Merge "drivers: cpuidle: lpm_levels: Correctly update lpm stats"
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 72c46f2..99f8c4c 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -164,10 +164,7 @@
 					"dfps_immediate_porch_mode_vfp" = FPS change request is
 					implemented immediately by changing panel vertical
 					front porch values.
-- qcom,min-refresh-rate:		Minimum refresh rate supported by the panel.
-- qcom,max-refresh-rate:		Maximum refresh rate supported by the panel. If max refresh
-					rate is not specified, then the frame rate of the panel in
-					qcom,mdss-dsi-panel-framerate is used.
+- qcom,dsi-supported-dfps-list:		List containing all the supported refresh rates.
 - qcom,mdss-dsi-bl-pmic-control-type:	A string that specifies the implementation of backlight
 					control for this panel.
 					"bl_ctrl_pwm" = Backlight controlled by PWM gpio.
@@ -527,6 +524,10 @@
 - qcom,mdss-dsi-panel-cmds-only-by-right: Boolean used to mention whether the panel support DSI1 or
 					DSI0 to send commands. If this was set, that mean the panel only support
 					DSI1 to send commands, otherwise DSI0 will send comands.
+- qcom,dsi-dyn-clk-enable:		Boolean to indicate dsi dynamic clock switch feature
+					is supported.
+- qcom,dsi-dyn-clk-list:		An u32 array which lists all the supported dsi bit clock
+					frequencies in Hz for the given panel.
 
 Required properties for sub-nodes:	None
 Optional properties:
@@ -649,8 +650,7 @@
 		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 		qcom,mdss-dsi-pan-enable-dynamic-fps;
 		qcom,mdss-dsi-pan-fps-update = "dfps_suspend_resume_mode";
-		qcom,min-refresh-rate = <30>;
-		qcom,max-refresh-rate = <60>;
+		qcom,dsi-supported-dfps-list = <48 55 60>;
 		qcom,mdss-dsi-bl-pmic-bank-select = <0>;
 		qcom,mdss-dsi-bl-pmic-pwm-frequency = <0>;
 		qcom,mdss-dsi-pwm-gpio = <&pm8941_mpps 5 0>;
@@ -781,5 +781,7 @@
 			                <2 2 1>;
 		qcom,default-topology-index = <0>;
 		qcom,mdss-dsi-dma-schedule-line = <5>;
+		qcom,dsi-dyn-clk-enable;
+		qcom,dsi-dyn-clk-list = <798240576 801594528 804948480>;
 	};
 };
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 1506e94..d1f435c 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -10,6 +10,7 @@
   Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
   the Cadence GEM, or the generic form: "cdns,gem".
   Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
+  Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
   Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
   Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
   Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 6c9d8253..c517d3c 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -1643,6 +1643,16 @@
 		threshold values for different codecs. First parameter is V(voltage)
 		second one is i(current), third one is r (resistance). Depending on the
 		codec set corresponding element in array and set others to 0.
+- qcom,msm-linein-det-swh: This property is used to distinguish linein jack
+	switch type on target typically the switch type will be normally open or
+	normally close, value for this property 0 for normally close and 1 for
+	normally open.
+- qcom,msm-lineout-det-swh: This property is used to distinguish lineout jack
+	switch type on target typically the switch type will be normally open or
+	normally close, value for this property 0 for normally close and 1 for
+	normally open.
+- qcom,linein-det-gpio : GPIO on which linein jack insertion/removal interrupt is received.
+- qcom,lineout-det-gpio : GPIO on which lineout jack insertion/removal interrupt is received.
 
 Example:
 	 sound {
@@ -1703,6 +1713,10 @@
 				<&wsa881x_213>, <&wsa881x_214>;
 		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrLeft",
 					  "SpkrRight", "SpkrLeft";
+		qcom,linein-det-swh = <1>;
+		qcom,lineout-det-swh = <1>;
+		qcom,linein-det-gpio = <&tlmm 124 0>;
+		qcom,lineout-det-gpio = <&tlmm 125 0>;
 	};
 
 * MSM8952 Slimbus ASoC Machine driver
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index 193a034..d9a0f69 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -154,6 +154,26 @@
                        enabled by default.
 data_flush             Enable data flushing before checkpoint in order to
                        persist data of regular and symlink.
+fault_injection=%d     Enable fault injection in all supported types with
+                       specified injection rate.
+fault_type=%d          Support configuring fault injection type, should be
+                       enabled with fault_injection option, fault type value
+                       is shown below, it supports single or combined type.
+                       Type_Name		Type_Value
+                       FAULT_KMALLOC		0x000000001
+                       FAULT_KVMALLOC		0x000000002
+                       FAULT_PAGE_ALLOC		0x000000004
+                       FAULT_PAGE_GET		0x000000008
+                       FAULT_ALLOC_BIO		0x000000010
+                       FAULT_ALLOC_NID		0x000000020
+                       FAULT_ORPHAN		0x000000040
+                       FAULT_BLOCK		0x000000080
+                       FAULT_DIR_DEPTH		0x000000100
+                       FAULT_EVICT_INODE	0x000000200
+                       FAULT_TRUNCATE		0x000000400
+                       FAULT_IO			0x000000800
+                       FAULT_CHECKPOINT		0x000001000
+                       FAULT_DISCARD		0x000002000
 mode=%s                Control block allocation mode which supports "adaptive"
                        and "lfs". In "lfs" mode, there should be no random
                        writes towards main area.
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx
index cfd31d9..f8bf140 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx
@@ -32,7 +32,7 @@
     Datasheet: Publicly available at the Texas Instruments website
                http://www.ti.com/
 
-Author: Lothar Felten <l-felten@ti.com>
+Author: Lothar Felten <lothar.felten@gmail.com>
 
 Description
 -----------
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f2139f5..fdc9af2 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1100,12 +1100,6 @@
 	nopku		[X86] Disable Memory Protection Keys CPU feature found
 			in some Intel CPUs.
 
-	eagerfpu=	[X86]
-			on	enable eager fpu restore
-			off	disable eager fpu restore
-			auto	selects the default scheme, which automatically
-				enables eagerfpu restore for xsaveopt.
-
 	module.async_probe [KNL]
 			Enable asynchronous probe on this module.
 
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index e52a472..827622e 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -122,14 +122,11 @@
 
 IP Fragmentation:
 
-ipfrag_high_thresh - INTEGER
-	Maximum memory used to reassemble IP fragments. When
-	ipfrag_high_thresh bytes of memory is allocated for this purpose,
-	the fragment handler will toss packets until ipfrag_low_thresh
-	is reached. This also serves as a maximum limit to namespaces
-	different from the initial one.
+ipfrag_high_thresh - LONG INTEGER
+	Maximum memory used to reassemble IP fragments.
 
-ipfrag_low_thresh - INTEGER
+ipfrag_low_thresh - LONG INTEGER
+	(Obsolete since linux-4.17)
 	Maximum memory used to reassemble IP fragments before the kernel
 	begins to remove incomplete fragment queues to free up resources.
 	The kernel still accepts new fragments for defragmentation.
diff --git a/Makefile b/Makefile
index cdc22f3..4f75361 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 129
+SUBLEVEL = 134
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/Kconfig b/arch/Kconfig
index ad306a9..0ecbd6d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -530,6 +530,7 @@
 	bool "Use clang Link Time Optimization (LTO) (EXPERIMENTAL)"
 	depends on ARCH_SUPPORTS_LTO_CLANG
 	depends on !FTRACE_MCOUNT_RECORD || HAVE_C_RECORDMCOUNT
+	depends on !KASAN
 	select LTO
 	select THIN_ARCHIVES
 	select LD_DEAD_CODE_DATA_ELIMINATION
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 54b54da..49112f7 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -84,7 +84,7 @@
 	"1:	llock   %[orig], [%[ctr]]		\n"		\
 	"	" #asm_op " %[val], %[orig], %[i]	\n"		\
 	"	scond   %[val], [%[ctr]]		\n"		\
-	"						\n"		\
+	"	bnz     1b				\n"		\
 	: [val]	"=&r"	(val),						\
 	  [orig] "=&r" (orig)						\
 	: [ctr]	"r"	(&v->counter),					\
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 0e8c015..3ce1213 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -213,6 +213,26 @@
 		task_thread_info(current)->thr_ptr;
 	}
 
+
+	/*
+	 * setup usermode thread pointer #1:
+	 * when child is picked by scheduler, __switch_to() uses @c_callee to
+	 * populate usermode callee regs: this works (despite being in a kernel
+	 * function) since special return path for child @ret_from_fork()
+	 * ensures those regs are not clobbered all the way to RTIE to usermode
+	 */
+	c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	/*
+	 * setup usermode thread pointer #2:
+	 * however for this special use of r25 in kernel, __switch_to() sets
+	 * r25 for kernel needs and only in the final return path is usermode
+	 * r25 setup, from pt_regs->user_r25. So set that up as well
+	 */
+	c_regs->user_r25 = c_callee->r25;
+#endif
+
 	return 0;
 }
 
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index ce54a70..a1a9280 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1770,7 +1770,7 @@
 			};
 		};
 
-		dcan1: can@481cc000 {
+		dcan1: can@4ae3c000 {
 			compatible = "ti,dra7-d_can";
 			ti,hwmods = "dcan1";
 			reg = <0x4ae3c000 0x2000>;
@@ -1780,7 +1780,7 @@
 			status = "disabled";
 		};
 
-		dcan2: can@481d0000 {
+		dcan2: can@48480000 {
 			compatible = "ti,dra7-d_can";
 			ti,hwmods = "dcan2";
 			reg = <0x48480000 0x2000>;
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index f6768d8..aa67cdb 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -257,9 +257,9 @@
 		qcom,cpulist = <&CPU0>;
 		qcom,target-dev = <&cpubw>;
 		qcom,core-dev-table =
-				<  153600 MHZ_TO_MBPS(200, 2) >,
-				<  576000 MHZ_TO_MBPS(691, 2) >,
-				< 1497600 MHZ_TO_MBPS(1383, 2)>;
+				<  153600 MHZ_TO_MBPS( 200, 4) >,
+				<  576000 MHZ_TO_MBPS( 691, 4) >,
+				< 1497600 MHZ_TO_MBPS(1383, 4)>;
 	};
 
 	clock_gcc: qcom,gcc@100000 {
diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi
index 7cb235e..6e9e1c2 100644
--- a/arch/arm/boot/dts/sama5d3_emac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
@@ -41,7 +41,7 @@
 			};
 
 			macb1: ethernet@f802c000 {
-				compatible = "cdns,at91sam9260-macb", "cdns,macb";
+				compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
 				reg = <0xf802c000 0x100>;
 				interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
 				pinctrl-names = "default";
diff --git a/arch/arm/configs/msm8909w-perf_defconfig b/arch/arm/configs/msm8909w-perf_defconfig
index 27878b6..8aa9311d 100644
--- a/arch/arm/configs/msm8909w-perf_defconfig
+++ b/arch/arm/configs/msm8909w-perf_defconfig
@@ -337,6 +337,7 @@
 CONFIG_FB_MSM_MDSS_WRITEBACK=y
 CONFIG_FB_MSM_MDSS_SPI_PANEL=y
 CONFIG_FB_MSM_MDSS_MDP3=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_SOUND=y
@@ -458,6 +459,8 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
diff --git a/arch/arm/configs/msm8909w_defconfig b/arch/arm/configs/msm8909w_defconfig
index 28e102e..99c1909 100644
--- a/arch/arm/configs/msm8909w_defconfig
+++ b/arch/arm/configs/msm8909w_defconfig
@@ -329,6 +329,7 @@
 CONFIG_FB_MSM_MDSS_WRITEBACK=y
 CONFIG_FB_MSM_MDSS_SPI_PANEL=y
 CONFIG_FB_MSM_MDSS_MDP3=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 # CONFIG_LCD_CLASS_DEVICE is not set
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -452,6 +453,8 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
diff --git a/arch/arm/configs/msm8937-perf_defconfig b/arch/arm/configs/msm8937-perf_defconfig
index c47203f..73e3c87 100644
--- a/arch/arm/configs/msm8937-perf_defconfig
+++ b/arch/arm/configs/msm8937-perf_defconfig
@@ -487,6 +487,7 @@
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
 CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
 CONFIG_USB_CONFIGFS_RNDIS=y
 CONFIG_USB_CONFIGFS_RMNET_BAM=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
diff --git a/arch/arm/configs/msm8937_defconfig b/arch/arm/configs/msm8937_defconfig
index e7017ec..a74358c 100644
--- a/arch/arm/configs/msm8937_defconfig
+++ b/arch/arm/configs/msm8937_defconfig
@@ -496,6 +496,7 @@
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
 CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
 CONFIG_USB_CONFIGFS_RNDIS=y
 CONFIG_USB_CONFIGFS_RMNET_BAM=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
diff --git a/arch/arm/configs/sdxpoorwills-auto-perf_defconfig b/arch/arm/configs/sdxpoorwills-auto-perf_defconfig
index 1fe7178..8ff8571 100644
--- a/arch/arm/configs/sdxpoorwills-auto-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-auto-perf_defconfig
@@ -228,6 +228,11 @@
 CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
 CONFIG_INPUT_GPIO=m
+CONFIG_BOSCH_DRIVER_LOG_FUNC=y
+CONFIG_SENSORS_SMI_ACC2X2=y
+CONFIG_SENSORS_SMI_ACC2X2_ENABLE_INT2=y
+CONFIG_SENSORS_SMI_GYRO=y
+CONFIG_SENSORS_SMI_GYRO_FIFO=y
 CONFIG_SERIO_LIBPS2=y
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_MSM=y
@@ -397,6 +402,11 @@
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_EXTCON_QCOM_SPMI_MISC=y
 CONFIG_IIO=y
+CONFIG_INV_MPU_IIO_IAM20680=y
+CONFIG_INV_MPU_IIO_I2C=y
+CONFIG_INV_MPU_IIO_SPI=y
+CONFIG_INV_TESTING=y
+CONFIG_IIO_ST_ASM330LHH=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_QCOM_SHOW_RESUME_IRQ=y
diff --git a/arch/arm/configs/sdxpoorwills-auto_defconfig b/arch/arm/configs/sdxpoorwills-auto_defconfig
index 9a62d53..7e6bc34 100644
--- a/arch/arm/configs/sdxpoorwills-auto_defconfig
+++ b/arch/arm/configs/sdxpoorwills-auto_defconfig
@@ -222,6 +222,11 @@
 CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
 CONFIG_INPUT_GPIO=m
+CONFIG_BOSCH_DRIVER_LOG_FUNC=y
+CONFIG_SENSORS_SMI_ACC2X2=y
+CONFIG_SENSORS_SMI_ACC2X2_ENABLE_INT2=y
+CONFIG_SENSORS_SMI_GYRO=y
+CONFIG_SENSORS_SMI_GYRO_FIFO=y
 CONFIG_SERIO_LIBPS2=y
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_MSM=y
@@ -402,6 +407,11 @@
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_EXTCON_QCOM_SPMI_MISC=y
 CONFIG_IIO=y
+CONFIG_INV_MPU_IIO_IAM20680=y
+CONFIG_INV_MPU_IIO_I2C=y
+CONFIG_INV_MPU_IIO_SPI=y
+CONFIG_INV_TESTING=y
+CONFIG_IIO_ST_ASM330LHH=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_QCOM_SHOW_RESUME_IRQ=y
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index f39bd51..faaf7c3 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -116,8 +116,8 @@
 		PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
 }
 
-extern unsigned char mvebu_boot_wa_start;
-extern unsigned char mvebu_boot_wa_end;
+extern unsigned char mvebu_boot_wa_start[];
+extern unsigned char mvebu_boot_wa_end[];
 
 /*
  * This function sets up the boot address workaround needed for SMP
@@ -130,7 +130,7 @@
 			     phys_addr_t resume_addr_reg)
 {
 	void __iomem *sram_virt_base;
-	u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start;
+	u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start;
 
 	mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
 	mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,
diff --git a/arch/arm/mach-omap2/omap_hwmod_reset.c b/arch/arm/mach-omap2/omap_hwmod_reset.c
index b68f9c0..d5ddba0 100644
--- a/arch/arm/mach-omap2/omap_hwmod_reset.c
+++ b/arch/arm/mach-omap2/omap_hwmod_reset.c
@@ -92,11 +92,13 @@
  */
 void omap_hwmod_rtc_unlock(struct omap_hwmod *oh)
 {
-	local_irq_disable();
+	unsigned long flags;
+
+	local_irq_save(flags);
 	omap_rtc_wait_not_busy(oh);
 	omap_hwmod_write(OMAP_RTC_KICK0_VALUE, oh, OMAP_RTC_KICK0_REG);
 	omap_hwmod_write(OMAP_RTC_KICK1_VALUE, oh, OMAP_RTC_KICK1_REG);
-	local_irq_enable();
+	local_irq_restore(flags);
 }
 
 /**
@@ -110,9 +112,11 @@
  */
 void omap_hwmod_rtc_lock(struct omap_hwmod *oh)
 {
-	local_irq_disable();
+	unsigned long flags;
+
+	local_irq_save(flags);
 	omap_rtc_wait_not_busy(oh);
 	omap_hwmod_write(0x0, oh, OMAP_RTC_KICK0_REG);
 	omap_hwmod_write(0x0, oh, OMAP_RTC_KICK1_REG);
-	local_irq_enable();
+	local_irq_restore(flags);
 }
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 96ec05d..2d53e26 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -802,6 +802,13 @@
 	help
 	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
 
+config OKL4_GUEST
+	bool "OKL4 Hypervisor guest support"
+	depends on ARM64 && OF
+	default n
+	help
+	  Say Y if you want to run Linux as a guest of the OKL4 hypervisor
+
 config FORCE_MAX_ZONEORDER
 	int
 	default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 2b265a7..826f47b 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -81,7 +81,7 @@
 KBUILD_CFLAGS_MODULE	+= -mcmodel=large
 ifeq ($(CONFIG_LTO_CLANG), y)
 # Code model is not stored in LLVM IR, so we need to pass it also to LLVMgold
-LDFLAGS		+= -plugin-opt=-code-model=large
+KBUILD_LDFLAGS_MODULE	+= -plugin-opt=-code-model=large
 endif
 endif
 
diff --git a/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi
index ecf28c5..47a925e 100644
--- a/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi
@@ -266,9 +266,9 @@
 	rpm-regulator-ldoa15 {
 		status = "okay";
 		pm660_l15: regulator-l15 {
-			regulator-min-microvolt = <3300000>;
-			regulator-max-microvolt = <3300000>;
-			qcom,init-voltage = <3300000>;
+			regulator-min-microvolt = <3000000>;
+			regulator-max-microvolt = <3000000>;
+			qcom,init-voltage = <3000000>;
 			status = "okay";
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
index 6a4c10e..5c63ed3 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
@@ -13,7 +13,6 @@
 #include <dt-bindings/clock/msm-clocks-8952.h>
 #include "msm8917-camera-sensor-qrd.dtsi"
 #include "msm8937-mdss-panels.dtsi"
-#include "msm8917-pmi8937.dtsi"
 
 &blsp1_uart2 {
 	status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/msm8937-camera.dtsi b/arch/arm64/boot/dts/qcom/msm8937-camera.dtsi
index 831ce61..b5f467e 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-camera.dtsi
@@ -24,7 +24,7 @@
 	qcom,csiphy@1b34000 {
 		status = "ok";
 		cell-index = <0>;
-		compatible = "qcom,csiphy-v10.00", "qcom,csiphy";
+		compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
 		reg = <0x1b34000 0x1000>,
 			<0x1b00030 0x4>;
 		reg-names = "csiphy", "csiphy_clk_mux";
@@ -45,7 +45,7 @@
 	qcom,csiphy@1b35000 {
 		status = "ok";
 		cell-index = <1>;
-		compatible = "qcom,csiphy-v10.00", "qcom,csiphy";
+		compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
 		reg = <0x1b35000 0x1000>,
 			<0x1b00038 0x4>;
 		reg-names = "csiphy", "csiphy_clk_mux";
diff --git a/arch/arm64/boot/dts/qcom/msm8940.dtsi b/arch/arm64/boot/dts/qcom/msm8940.dtsi
index 64fbaf9..f1a3abe 100644
--- a/arch/arm64/boot/dts/qcom/msm8940.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8940.dtsi
@@ -110,6 +110,101 @@
 		qcom,bw-dwnstep = <4000>;
 		qcom,max-vote = <4000>;
 	};
+
+	/delete-node/ funnel@6120000;
+	funnel_right: funnel@6120000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6120000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-right";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_right_out_funnel_in0: endpoint {
+					remote-endpoint =
+						<&funnel_in0_in_funnel_right>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				funnel_right_in_modem_etm1: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&modem_etm1_out_funnel_right>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				funnel_right_in_modem_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&modem_etm0_out_funnel_right>;
+				};
+			};
+
+			port@3 {
+				reg = <3>;
+				funnel_right_in_funnel_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					       <&funnel_apss_out_funnel_right>;
+				};
+			};
+		};
+	};
+
+	/delete-node/ cti@6124000;
+	cti_modem_cpu0: cti@6128000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6128000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-modem-cpu0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+
+	cti_modem_cpu1: cti@6124000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6124000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-modem-cpu1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	modem_etm1 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-modem-etm1";
+		qcom,inst-id = <11>;
+
+		port {
+			modem_etm1_out_funnel_right: endpoint {
+				remote-endpoint = <&funnel_right_in_modem_etm1>;
+			};
+		};
+	};
+
 };
 
 &clock_gcc {
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index 24701bf..b1cfed4 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -154,6 +154,15 @@
 			reusable;
 			size = <0x400000>;
 		};
+
+		/* global autoconfigured region for contiguous allocations */
+		linux,cma {
+			compatible = "shared-dma-pool";
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x1400000>;
+			linux,cma-default;
+		};
 	};
 
 	aliases {
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
index a6f29bf..da15b33 100644
--- a/arch/arm64/boot/dts/qcom/pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -44,6 +44,7 @@
 			interrupt-names = "eoc-int-en-set";
 			qcom,adc-vdd-reference = <1875>;
 			qcom,adc-full-scale-code = <0x70e4>;
+			qcom,pmic-revid = <&pmi632_revid>;
 
 			chan@0 {
 				label = "ref_gnd";
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-ipcamera.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc-ipcamera.dtsi
index 0b36ffe..c211cec 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-lc-ipcamera.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-ipcamera.dtsi
@@ -206,6 +206,13 @@
 	qcom,wsa-max-devs = <1>;
 	qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
 	qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
+	qcom,linein-det-swh = <1>;
+	qcom,lineout-det-swh = <1>;
+	qcom,linein-det-gpio = <&tlmm 124 0>;
+	qcom,lineout-det-gpio = <&tlmm 125 0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&jack_det_linein_default
+		     &jack_det_lineout_default>;
 };
 
 &soc {
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi
index 99bf1e5..382fc1d5 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi
@@ -58,7 +58,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		ports {
 			#address-cells = <1>;
@@ -81,7 +81,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -112,11 +112,10 @@
 &dsi_dual_nt35597_truly_video {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <53 55 60>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index cb46865..3c6c7a8 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -18,40 +18,52 @@
 	qcom,msm-id = <347 0x0>;
 };
 
+&removed_region {
+	reg = <0 0x85fc0000 0 0x1540000>;
+};
+
+&pil_camera_mem {
+	reg = <0 0x8b800000 0 0x500000>;
+};
+
 &pil_modem_mem {
-	reg = <0 0x8b000000 0 0x3100000>;
+	reg = <0 0x8bd00000 0 0x3100000>;
 };
 
 &pil_video_mem {
-	reg = <0 0x8e100000 0 0x500000>;
+	reg = <0 0x8ee00000 0 0x500000>;
 };
 
 &wlan_msa_mem {
-	reg = <0 0x8e600000 0 0x100000>;
+	reg = <0 0x8f300000 0 0x100000>;
 };
 
 &pil_cdsp_mem {
-	reg = <0 0x8e700000 0 0x800000>;
+	reg = <0 0x8f400000 0 0x800000>;
 };
 
 &pil_mba_mem {
-	reg = <0 0x8ef00000 0 0x200000>;
+	reg = <0 0x8fc00000 0 0x200000>;
 };
 
 &pil_adsp_mem {
-	reg = <0 0x8f100000 0 0x1e00000>;
+	reg = <0 0x8fe00000 0 0x1e00000>;
 };
 
 &pil_ipa_fw_mem {
-	reg = <0 0x90f00000 0 0x10000>;
+	reg = <0 0x91c00000 0 0x10000>;
 };
 
 &pil_ipa_gsi_mem {
-	reg = <0 0x90f10000 0 0x5000>;
+	reg = <0 0x91c10000 0 0x5000>;
 };
 
 &pil_gpu_mem {
-	reg = <0 0x90f15000 0 0x2000>;
+	reg = <0 0x91c15000 0 0x2000>;
+};
+
+&qseecom_mem {
+	reg = <0 0x9e800000 0 0x1000000>;
 };
 
 &adsp_mem {
@@ -64,6 +76,7 @@
 
 &qcom_seecom {
 	reg = <0x86d00000 0x800000>;
+	/delete-property/ qcom,appsbl-qseecom-support;
 };
 
 &sp_mem {
diff --git a/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi b/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi
index 46e4bf7..7a66c34 100644
--- a/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi
@@ -389,7 +389,7 @@
 	thermal-zones {
 		xo-therm-buf-adc {
 			polling-delay-passive = <0>;
-			polling-delay = <0>;
+			polling-delay = <5000>;
 			thermal-sensors = <&pm8916_vadc 0x3c>;
 			thermal-governor = "user_space";
 
@@ -404,7 +404,7 @@
 
 		xo-therm-adc {
 			polling-delay-passive = <0>;
-			polling-delay = <0>;
+			polling-delay = <5000>;
 			thermal-sensors = <&pm8916_vadc 0x32>;
 			thermal-governor = "user_space";
 
@@ -419,7 +419,7 @@
 
 		pa-therm0-adc {
 			polling-delay-passive = <0>;
-			polling-delay = <0>;
+			polling-delay = <5000>;
 			thermal-sensors = <&pm8916_vadc 0x36>;
 			thermal-governor = "user_space";
 
@@ -434,7 +434,7 @@
 
 		skin-therm-adc {
 			polling-delay-passive = <0>;
-			polling-delay = <0>;
+			polling-delay = <5000>;
 			thermal-sensors = <&pm8916_vadc 0x11>;
 			thermal-governor = "user_space";
 
diff --git a/arch/arm64/boot/dts/qcom/qm215-qrd.dtsi b/arch/arm64/boot/dts/qcom/qm215-qrd.dtsi
index 76540a1..c37df9c 100644
--- a/arch/arm64/boot/dts/qcom/qm215-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215-qrd.dtsi
@@ -20,6 +20,7 @@
 	qcom,chgr-led-support;
 	qcom,vddmax-mv = <4400>;
 	qcom,vddsafe-mv = <4400>;
+	qcom,batt-hot-percentage = <35>;
 };
 
 &pm8916_bms{
@@ -120,6 +121,39 @@
 	/delete-node/ qcom,panel-supply-entry@3;
 };
 
+&pm8916_gpios {
+	nfc_clk {
+		nfc_clk_default: nfc_clk_default {
+			pins = "gpio2";
+			function = "normal";
+			input-enable;
+			power-source = <1>;
+		};
+	};
+};
+
+&i2c_5 { /* BLSP2 QUP1 (NFC) */
+	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 17 0x00>;
+		qcom,nq-ven = <&tlmm 16 0x00>;
+		qcom,nq-firm = <&tlmm 130 0x00>;
+		qcom,nq-clkreq = <&pm8916_gpios 2 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK2";
+		interrupts = <17 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_disable_active
+						&nfc_clk_default>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_disable_suspend>;
+		clocks = <&clock_gcc clk_bb_clk2_pin>;
+		clock-names = "ref_clk";
+	};
+};
+
 &mdss_dsi {
 	hw-config = "single_dsi";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm439.dtsi b/arch/arm64/boot/dts/qcom/sdm439.dtsi
index a59b457..ddf9ee6 100644
--- a/arch/arm64/boot/dts/qcom/sdm439.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439.dtsi
@@ -35,6 +35,12 @@
 		qcom,csi-vdd-voltage = <800000>;
 		qcom,mipi-csi-vdd-supply = <&pm8953_l23>;
 	};
+	qcom,csiphy@1b34000 {
+		compatible = "qcom,csiphy-v10.00", "qcom,csiphy";
+	};
+	qcom,csiphy@1b35000 {
+		compatible = "qcom,csiphy-v10.00", "qcom,csiphy";
+	};
 
 	/delete-node/ qcom,msm-cpufreq;
 	msm_cpufreq: qcom,msm-cpufreq {
@@ -675,6 +681,10 @@
 	};
 };
 
+&sdhc_1 {
+	qcom,ddr-config = <0x00040868>;
+};
+
 &mdss_mdp {
 	qcom,vbif-settings = <0xd0 0x20>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
index 73c7be2..15874ff 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
@@ -43,6 +43,7 @@
 		qcom,mi2s-audio-intf;
 		qcom,auxpcm-audio-intf;
 		qcom,ext-disp-audio-rx;
+		qcom,afe-rxtx-lb;
 		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
 			<&loopback>, <&compress>, <&hostless>,
 			<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
@@ -73,7 +74,8 @@
 			<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
 			<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
 			<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>,
-			<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>;
+			<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>,
+			<&afe_loopback_tx>;
 		asoc-cpu-names = "msm-dai-q6-dp.24608",
 			"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
 			"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
@@ -99,7 +101,8 @@
 			"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
 			"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
 			"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913",
-			"msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929";
+			"msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929",
+			"msm-dai-q6-dev.24577";
 	};
 
 	tasha_snd: sound-tasha {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index 59fb78e..aa6b890 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1732,6 +1732,36 @@
 			};
 		};
 
+		gpio_jack_det_line_in {
+			jack_det_linein_default: jack_det_linein_default {
+				mux {
+					pins = "gpio124";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio124";
+					bias-pull-up; /* pull up */
+					input-enable;
+				};
+			};
+		};
+
+		gpio_jack_det_line_out {
+			jack_det_lineout_default: jack_det_lineout_default {
+				mux {
+					pins = "gpio125";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio125";
+					bias-pull-up; /* pull up */
+					input-enable;
+				};
+			};
+		};
+
 		flash_led3_front {
 			flash_led3_front_en: flash_led3_front_en {
 				mux {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index 92d4317..2887f38 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -137,9 +137,16 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
-			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll BYTECLK_SRC_0_CLK>,
+			<&mdss_dsi0_pll PCLK_SRC_0_CLK>,
+			<&mdss_dsi0_pll SHADOW_BYTECLK_SRC_0_CLK>,
+			<&mdss_dsi0_pll SHADOW_PCLK_SRC_0_CLK>;
+		clock-names = "mux_byte_clk", "mux_pixel_clk",
+				"src_byte_clk", "src_pixel_clk",
+				"shadow_byte_clk", "shadow_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -162,7 +169,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -186,7 +193,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy1>;
 		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
 			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -209,8 +216,14 @@
 		qcom,dsi-ctrl = <&mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy1>;
 		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
-			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+			<&mdss_dsi1_pll PCLK_MUX_1_CLK>,
+			<&mdss_dsi1_pll BYTECLK_SRC_1_CLK>,
+			<&mdss_dsi1_pll PCLK_SRC_1_CLK>,
+			<&mdss_dsi1_pll SHADOW_BYTECLK_SRC_1_CLK>,
+			<&mdss_dsi1_pll SHADOW_PCLK_SRC_1_CLK>;
+		clock-names = "mux_byte_clk", "mux_pixel_clk",
+				"src_byte_clk", "src_pixel_clk",
+				"shadow_byte_clk", "shadow_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -234,7 +247,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -252,7 +265,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -270,7 +283,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -288,7 +301,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -306,7 +319,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -324,7 +337,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -342,7 +355,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -365,7 +378,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -388,7 +401,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -410,7 +423,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -432,7 +445,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -455,7 +468,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -478,7 +491,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -501,7 +514,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -524,7 +537,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		ports {
 			#address-cells = <1>;
@@ -621,11 +634,10 @@
 &dsi_dual_nt35597_truly_video {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <53 55 60>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
@@ -633,6 +645,9 @@
 	qcom,mdss-dsi-panel-status-value = <0x9c>;
 	qcom,mdss-dsi-panel-on-check-value = <0x9c>;
 	qcom,mdss-dsi-panel-status-read-length = <1>;
+	qcom,dsi-dyn-clk-enable;
+	qcom,dsi-dyn-clk-list =
+		<804948480 798240576 801594528 808302432 811656384>;
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -694,11 +709,10 @@
 &dsi_nt35597_truly_dsc_video {
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <53 55 60>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
@@ -935,11 +949,10 @@
 &dsi_nt35695b_truly_fhd_video {
 	qcom,mdss-dsi-t-clk-post = <0x07>;
 	qcom,mdss-dsi-t-clk-pre = <0x1c>;
-	qcom,mdss-dsi-min-refresh-rate = <48>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <48 53 55 60>;
 	qcom,mdss-dsi-display-timings {
 		timing@0 {
 			qcom,mdss-dsi-panel-phy-timings = [00 1c 05 06 0b 0c
@@ -990,11 +1003,10 @@
 &dsi_hx8399_truly_cmd {
 	qcom,mdss-dsi-t-clk-post = <0x0E>;
 	qcom,mdss-dsi-t-clk-pre = <0x30>;
-	qcom,mdss-dsi-min-refresh-rate = <55>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <55 60>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
index 72e3f5f..326f4c0 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,11 +18,14 @@
 		#clock-cells = <1>;
 		reg = <0xae94a00 0x1e0>,
 		      <0xae94400 0x800>,
-		      <0xaf03000 0x8>;
-		reg-names = "pll_base", "phy_base", "gdsc_base";
+		      <0xaf03000 0x8>,
+		      <0xae94200 0x100>;
+		reg-names = "pll_base", "phy_base", "gdsc_base",
+				"dynamic_pll_base";
 		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
 		clock-names = "iface_clk";
 		clock-rate = <0>;
+		memory-region = <&dfps_data_memory>;
 		gdsc-supply = <&mdss_core_gdsc>;
 		qcom,platform-supply-entries {
 			#address-cells = <1>;
@@ -45,8 +48,10 @@
 		#clock-cells = <1>;
 		reg = <0xae96a00 0x1e0>,
 		      <0xae96400 0x800>,
-		      <0xaf03000 0x8>;
-		reg-names = "pll_base", "phy_base", "gdsc_base";
+		      <0xaf03000 0x8>,
+		      <0xae96200 0x100>;
+		reg-names = "pll_base", "phy_base", "gdsc_base",
+				"dynamic_pll_base";
 		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
 		clock-names = "iface_clk";
 		clock-rate = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
index fb717f3..9a567a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
@@ -485,8 +485,9 @@
 		compatible = "qcom,dsi-phy-v3.0";
 		label = "dsi-phy-0";
 		cell-index = <0>;
-		reg = <0xae94400 0x7c0>;
-		reg-names = "dsi_phy";
+		reg = <0xae94400 0x7c0>,
+			<0xae94200 0x100>;
+		reg-names = "dsi_phy", "dyn_refresh_base";
 		gdsc-supply = <&mdss_core_gdsc>;
 		vdda-0p9-supply = <&pm660l_l1>;
 		qcom,platform-strength-ctrl = [55 03
@@ -518,8 +519,9 @@
 		compatible = "qcom,dsi-phy-v3.0";
 		label = "dsi-phy-1";
 		cell-index = <1>;
-		reg = <0xae96400 0x7c0>;
-		reg-names = "dsi_phy";
+		reg = <0xae96400 0x7c0>,
+			<0xae96200 0x100>;
+		reg-names = "dsi_phy", "dyn_refresh_base";
 		gdsc-supply = <&mdss_core_gdsc>;
 		vdda-0p9-supply = <&pm660l_l1>;
 		qcom,platform-strength-ctrl = [55 03
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index f1109e2..af44079 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -596,10 +596,15 @@
 		};
 
 		cont_splash_memory: cont_splash_region@9c000000 {
-			reg = <0x0 0x9c000000 0x0 0x02400000>;
+			reg = <0x0 0x9c000000 0x0 0x2300000>;
 			label = "cont_splash_region";
 		};
 
+		dfps_data_memory: dfps_data_region@9e300000 {
+			reg = <0x0 0x9e300000 0x0 0x0100000>;
+			label = "dfps_data_region";
+		};
+
 		dump_mem: mem_dump_region {
 			compatible = "shared-dma-pool";
 			reusable;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr-dvt.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr-dvt.dtsi
index b671d0e..9409a4c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr-dvt.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr-dvt.dtsi
@@ -69,7 +69,7 @@
 		sensor-position-yaw = <0>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
-		cam_vdig-supply = <&pm8998_s3>;
+		cam_vdig-supply = <&camera_eyetracking_force>;
 		cam_clk-supply = <&titan_top_gdsc>;
 		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 					"cam_clk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
index 6052074..f19007f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
@@ -591,7 +591,7 @@
 		sensor-position-yaw = <0>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
-		cam_vdig-supply = <&pm8998_s3>;
+		cam_vdig-supply = <&camera_eyetracking_force>;
 		cam_clk-supply = <&titan_top_gdsc>;
 		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 					"cam_clk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 0de0331..8c746e5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -116,7 +116,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -140,7 +140,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -164,7 +164,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -188,7 +188,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -212,7 +212,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -235,7 +235,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -259,7 +259,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy1>;
 		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
 			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -283,7 +283,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy1>;
 		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
 			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -307,7 +307,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -325,7 +325,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -343,7 +343,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -361,7 +361,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -379,7 +379,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -397,7 +397,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -415,7 +415,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -439,7 +439,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -463,7 +463,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -487,7 +487,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 				<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -561,11 +561,10 @@
 &dsi_dual_nt35597_truly_video {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <53 55 60>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
@@ -634,11 +633,10 @@
 &dsi_nt35597_truly_dsc_video {
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <53 55 60>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
diff --git a/arch/arm64/configs/msm8937-perf_defconfig b/arch/arm64/configs/msm8937-perf_defconfig
index 65b5dd6..c71f860 100644
--- a/arch/arm64/configs/msm8937-perf_defconfig
+++ b/arch/arm64/configs/msm8937-perf_defconfig
@@ -481,6 +481,7 @@
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
 CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
 CONFIG_USB_CONFIGFS_RNDIS=y
 CONFIG_USB_CONFIGFS_RMNET_BAM=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
diff --git a/arch/arm64/configs/msm8937_defconfig b/arch/arm64/configs/msm8937_defconfig
index 4b416db7..5b6f4a5 100644
--- a/arch/arm64/configs/msm8937_defconfig
+++ b/arch/arm64/configs/msm8937_defconfig
@@ -491,6 +491,7 @@
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
 CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
 CONFIG_USB_CONFIGFS_RNDIS=y
 CONFIG_USB_CONFIGFS_RMNET_BAM=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 1b5e0e8..7e2b3e3 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -28,7 +28,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-	asm goto("1: nop\n\t"
+	asm_volatile_goto("1: nop\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 ".align 3\n\t"
 		 ".quad 1b, %l[l_yes], %c0\n\t"
@@ -42,7 +42,7 @@
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-	asm goto("1: b %l[l_yes]\n\t"
+	asm_volatile_goto("1: b %l[l_yes]\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 ".align 3\n\t"
 		 ".quad 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fe39e68..ba0d52c 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -42,6 +42,11 @@
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 
+static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+	return !(vcpu->arch.hcr_el2 & HCR_RW);
+}
+
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index d3e0a2f..e41a7b4 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -57,6 +57,45 @@
 	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
 }
 
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+	u64 off = core_reg_offset_from_id(reg->id);
+	int size;
+
+	switch (off) {
+	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
+	case KVM_REG_ARM_CORE_REG(regs.sp):
+	case KVM_REG_ARM_CORE_REG(regs.pc):
+	case KVM_REG_ARM_CORE_REG(regs.pstate):
+	case KVM_REG_ARM_CORE_REG(sp_el1):
+	case KVM_REG_ARM_CORE_REG(elr_el1):
+	case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+	     KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+		size = sizeof(__u64);
+		break;
+
+	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+		size = sizeof(__uint128_t);
+		break;
+
+	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+		size = sizeof(__u32);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (KVM_REG_SIZE(reg->id) == size &&
+	    IS_ALIGNED(off, size / sizeof(__u32)))
+		return 0;
+
+	return -EINVAL;
+}
+
 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
 	/*
@@ -76,6 +115,9 @@
 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 		return -ENOENT;
 
+	if (validate_core_offset(reg))
+		return -EINVAL;
+
 	if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
 		return -EFAULT;
 
@@ -98,6 +140,9 @@
 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 		return -ENOENT;
 
+	if (validate_core_offset(reg))
+		return -EINVAL;
+
 	if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
 		return -EINVAL;
 
@@ -107,17 +152,25 @@
 	}
 
 	if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
-		u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
+		u64 mode = (*(u64 *)valp) & COMPAT_PSR_MODE_MASK;
 		switch (mode) {
 		case COMPAT_PSR_MODE_USR:
+			if (!system_supports_32bit_el0())
+				return -EINVAL;
+			break;
 		case COMPAT_PSR_MODE_FIQ:
 		case COMPAT_PSR_MODE_IRQ:
 		case COMPAT_PSR_MODE_SVC:
 		case COMPAT_PSR_MODE_ABT:
 		case COMPAT_PSR_MODE_UND:
+			if (!vcpu_el1_is_32bit(vcpu))
+				return -EINVAL;
+			break;
 		case PSR_MODE_EL0t:
 		case PSR_MODE_EL1t:
 		case PSR_MODE_EL1h:
+			if (vcpu_el1_is_32bit(vcpu))
+				return -EINVAL;
 			break;
 		default:
 			err = -EINVAL;
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 5e4a59b..2691a18 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -211,7 +211,7 @@
  * This is defined the same way as ffs.
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
-static inline long fls(int x)
+static inline int fls(int x)
 {
 	int r;
 
@@ -232,7 +232,7 @@
  * the libc and compiler builtin ffs routines, therefore
  * differs in spirit from the above ffz (man ffs).
  */
-static inline long ffs(int x)
+static inline int ffs(int x)
 {
 	int r;
 
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index b901778..0e2be48 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -68,7 +68,7 @@
 			panic("Can't create %s() memory pool!", __func__);
 		else
 			gen_pool_add(coherent_pool,
-				pfn_to_virt(max_low_pfn),
+				(unsigned long)pfn_to_virt(max_low_pfn),
 				hexagon_coherent_pool_size, -1);
 	}
 
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index e3acf5c..0292504 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -365,9 +365,9 @@
 }
 early_param("fadump_reserve_mem", early_fadump_reserve_mem);
 
-static void register_fw_dump(struct fadump_mem_struct *fdm)
+static int register_fw_dump(struct fadump_mem_struct *fdm)
 {
-	int rc;
+	int rc, err;
 	unsigned int wait_time;
 
 	pr_debug("Registering for firmware-assisted kernel dump...\n");
@@ -384,7 +384,11 @@
 
 	} while (wait_time);
 
+	err = -EIO;
 	switch (rc) {
+	default:
+		pr_err("Failed to register. Unknown Error(%d).\n", rc);
+		break;
 	case -1:
 		printk(KERN_ERR "Failed to register firmware-assisted kernel"
 			" dump. Hardware Error(%d).\n", rc);
@@ -392,18 +396,22 @@
 	case -3:
 		printk(KERN_ERR "Failed to register firmware-assisted kernel"
 			" dump. Parameter Error(%d).\n", rc);
+		err = -EINVAL;
 		break;
 	case -9:
 		printk(KERN_ERR "firmware-assisted kernel dump is already "
 			" registered.");
 		fw_dump.dump_registered = 1;
+		err = -EEXIST;
 		break;
 	case 0:
 		printk(KERN_INFO "firmware-assisted kernel dump registration"
 			" is successful\n");
 		fw_dump.dump_registered = 1;
+		err = 0;
 		break;
 	}
+	return err;
 }
 
 void crash_fadump(struct pt_regs *regs, const char *str)
@@ -1006,7 +1014,7 @@
 	return addr;
 }
 
-static void register_fadump(void)
+static int register_fadump(void)
 {
 	unsigned long addr;
 	void *vaddr;
@@ -1017,7 +1025,7 @@
 	 * assisted dump.
 	 */
 	if (!fw_dump.reserve_dump_area_size)
-		return;
+		return -ENODEV;
 
 	ret = fadump_setup_crash_memory_ranges();
 	if (ret)
@@ -1032,7 +1040,7 @@
 	fadump_create_elfcore_headers(vaddr);
 
 	/* register the future kernel dump with firmware. */
-	register_fw_dump(&fdm);
+	return register_fw_dump(&fdm);
 }
 
 static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
@@ -1218,7 +1226,6 @@
 	switch (buf[0]) {
 	case '0':
 		if (fw_dump.dump_registered == 0) {
-			ret = -EINVAL;
 			goto unlock_out;
 		}
 		/* Un-register Firmware-assisted dump */
@@ -1226,11 +1233,11 @@
 		break;
 	case '1':
 		if (fw_dump.dump_registered == 1) {
-			ret = -EINVAL;
+			ret = -EEXIST;
 			goto unlock_out;
 		}
 		/* Register Firmware-assisted dump */
-		register_fadump();
+		ret = register_fadump();
 		break;
 	default:
 		ret = -EINVAL;
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 2694d07..9dafd7a 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -186,7 +186,12 @@
 			(unsigned long)(crashk_res.start >> 20),
 			(unsigned long)(memblock_phys_mem_size() >> 20));
 
-	memblock_reserve(crashk_res.start, crash_size);
+	if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
+	    memblock_reserve(crashk_res.start, crash_size)) {
+		pr_err("Failed to reserve memory for crashkernel!\n");
+		crashk_res.start = crashk_res.end = 0;
+		return;
+	}
 }
 
 int overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 05f09ae..915e89f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -314,7 +314,7 @@
 	unsigned long pp, key;
 	unsigned long v, gr;
 	__be64 *hptep;
-	int index;
+	long int index;
 	int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
 
 	/* Get SLB entry */
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index f52cc6f..8015e40 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2623,7 +2623,7 @@
 	level_shift = entries_shift + 3;
 	level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
 
-	if ((level_shift - 3) * levels + page_shift >= 60)
+	if ((level_shift - 3) * levels + page_shift >= 55)
 		return -EINVAL;
 
 	/* Allocate TCE table */
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 02042b6..e6665a6 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -79,7 +79,7 @@
 struct dcss_segment {
 	struct list_head list;
 	char dcss_name[8];
-	char res_name[15];
+	char res_name[16];
 	unsigned long start_addr;
 	unsigned long end;
 	atomic_t ref_count;
@@ -432,7 +432,7 @@
 	memcpy(&seg->res_name, seg->dcss_name, 8);
 	EBCASC(seg->res_name, 8);
 	seg->res_name[8] = '\0';
-	strncat(seg->res_name, " (DCSS)", 7);
+	strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
 	seg->res->name = seg->res_name;
 	rc = seg->vm_segtype;
 	if (rc == SEG_TYPE_SC ||
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 995f785..781a044 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -26,7 +26,7 @@
 		.data		= &page_table_allocate_pgste,
 		.maxlen		= sizeof(int),
 		.mode		= S_IRUGO | S_IWUSR,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_dointvec_minmax,
 		.extra1		= &page_table_allocate_pgste_min,
 		.extra2		= &page_table_allocate_pgste_max,
 	},
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index 98543be..f2a764f 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -210,7 +210,6 @@
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_VIRTIO_BLK=y
 CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index dd19584..5773e11 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -48,21 +48,13 @@
 #ifdef CONFIG_X86_64
 /*
  * use carryless multiply version of crc32c when buffer
- * size is >= 512 (when eager fpu is enabled) or
- * >= 1024 (when eager fpu is disabled) to account
+ * size is >= 512 to account
  * for fpu state save/restore overhead.
  */
-#define CRC32C_PCL_BREAKEVEN_EAGERFPU	512
-#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU	1024
+#define CRC32C_PCL_BREAKEVEN	512
 
 asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
 				unsigned int crc_init);
-static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
-#define set_pcl_breakeven_point()					\
-do {									\
-	if (!use_eager_fpu())						\
-		crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU;	\
-} while (0)
 #endif /* CONFIG_X86_64 */
 
 static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
@@ -185,7 +177,7 @@
 	 * use faster PCL version if datasize is large enough to
 	 * overcome kernel fpu state save/restore overhead
 	 */
-	if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+	if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
 		kernel_fpu_begin();
 		*crcp = crc_pcl(data, len, *crcp);
 		kernel_fpu_end();
@@ -197,7 +189,7 @@
 static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
 				u8 *out)
 {
-	if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+	if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
 		kernel_fpu_begin();
 		*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
 		kernel_fpu_end();
@@ -257,7 +249,6 @@
 		alg.update = crc32c_pcl_intel_update;
 		alg.finup = crc32c_pcl_intel_finup;
 		alg.digest = crc32c_pcl_intel_digest;
-		set_pcl_breakeven_point();
 	}
 #endif
 	return crypto_register_shash(&alg);
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 28f3691..d764992 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -91,7 +91,7 @@
 .endm
 
 .macro TRACE_IRQS_IRETQ_DEBUG
-	bt	$9, EFLAGS(%rsp)		/* interrupts off? */
+	btl	$9, EFLAGS(%rsp)		/* interrupts off? */
 	jnc	1f
 	TRACE_IRQS_ON_DEBUG
 1:
@@ -485,7 +485,7 @@
 #ifdef CONFIG_PREEMPT
 	/* Interrupts are off */
 	/* Check if we need preemption */
-	bt	$9, EFLAGS(%rsp)		/* were interrupts off? */
+	btl	$9, EFLAGS(%rsp)		/* were interrupts off? */
 	jnc	1f
 0:	cmpl	$0, PER_CPU_VAR(__preempt_count)
 	jnz	1f
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 02223cb..1e96709 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -37,8 +37,9 @@
 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
 	long ret;
-	asm("syscall" : "=a" (ret) :
-	    "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+	asm ("syscall" : "=a" (ret), "=m" (*ts) :
+	     "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+	     "memory", "rcx", "r11");
 	return ret;
 }
 
@@ -46,8 +47,9 @@
 {
 	long ret;
 
-	asm("syscall" : "=a" (ret) :
-	    "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+	asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
+	     "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
+	     "memory", "rcx", "r11");
 	return ret;
 }
 
@@ -58,13 +60,13 @@
 {
 	long ret;
 
-	asm(
+	asm (
 		"mov %%ebx, %%edx \n"
-		"mov %2, %%ebx \n"
+		"mov %[clock], %%ebx \n"
 		"call __kernel_vsyscall \n"
 		"mov %%edx, %%ebx \n"
-		: "=a" (ret)
-		: "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+		: "=a" (ret), "=m" (*ts)
+		: "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
 		: "memory", "edx");
 	return ret;
 }
@@ -73,13 +75,13 @@
 {
 	long ret;
 
-	asm(
+	asm (
 		"mov %%ebx, %%edx \n"
-		"mov %2, %%ebx \n"
+		"mov %[tv], %%ebx \n"
 		"call __kernel_vsyscall \n"
 		"mov %%edx, %%ebx \n"
-		: "=a" (ret)
-		: "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+		: "=a" (ret), "=m" (*tv), "=m" (*tz)
+		: "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
 		: "memory", "edx");
 	return ret;
 }
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 5d103a8..10c1a5c 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -342,7 +342,7 @@
 
 	mask = x86_pmu.lbr_nr - 1;
 	tos = task_ctx->tos;
-	for (i = 0; i < tos; i++) {
+	for (i = 0; i < task_ctx->valid_lbrs; i++) {
 		lbr_idx = (tos - i) & mask;
 		wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
 		wrlbr_to  (lbr_idx, task_ctx->lbr_to[i]);
@@ -350,6 +350,15 @@
 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
 			wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
 	}
+
+	for (; i < x86_pmu.lbr_nr; i++) {
+		lbr_idx = (tos - i) & mask;
+		wrlbr_from(lbr_idx, 0);
+		wrlbr_to(lbr_idx, 0);
+		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+			wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
+	}
+
 	wrmsrl(x86_pmu.lbr_tos, tos);
 	task_ctx->lbr_stack_state = LBR_NONE;
 }
@@ -357,7 +366,7 @@
 static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
 {
 	unsigned lbr_idx, mask;
-	u64 tos;
+	u64 tos, from;
 	int i;
 
 	if (task_ctx->lbr_callstack_users == 0) {
@@ -367,13 +376,17 @@
 
 	mask = x86_pmu.lbr_nr - 1;
 	tos = intel_pmu_lbr_tos();
-	for (i = 0; i < tos; i++) {
+	for (i = 0; i < x86_pmu.lbr_nr; i++) {
 		lbr_idx = (tos - i) & mask;
-		task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
+		from = rdlbr_from(lbr_idx);
+		if (!from)
+			break;
+		task_ctx->lbr_from[i] = from;
 		task_ctx->lbr_to[i]   = rdlbr_to(lbr_idx);
 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
 			rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
 	}
+	task_ctx->valid_lbrs = i;
 	task_ctx->tos = tos;
 	task_ctx->lbr_stack_state = LBR_VALID;
 }
@@ -522,7 +535,7 @@
  */
 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
 {
-	bool need_info = false;
+	bool need_info = false, call_stack = false;
 	unsigned long mask = x86_pmu.lbr_nr - 1;
 	int lbr_format = x86_pmu.intel_cap.lbr_format;
 	u64 tos = intel_pmu_lbr_tos();
@@ -533,7 +546,7 @@
 	if (cpuc->lbr_sel) {
 		need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
 		if (cpuc->lbr_sel->config & LBR_CALL_STACK)
-			num = tos;
+			call_stack = true;
 	}
 
 	for (i = 0; i < num; i++) {
@@ -546,6 +559,13 @@
 		from = rdlbr_from(lbr_idx);
 		to   = rdlbr_to(lbr_idx);
 
+		/*
+		 * Read LBR call stack entries
+		 * until invalid entry (0s) is detected.
+		 */
+		if (call_stack && !from)
+			break;
+
 		if (lbr_format == LBR_FORMAT_INFO && need_info) {
 			u64 info;
 
@@ -1175,4 +1195,8 @@
 
 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
 	x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
+
+	/* Knights Landing does have MISPREDICT bit */
+	if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
+		x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
 }
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index f356317..1bfebbc 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -633,6 +633,7 @@
 	u64 lbr_to[MAX_LBR_ENTRIES];
 	u64 lbr_info[MAX_LBR_ENTRIES];
 	int tos;
+	int valid_lbrs;
 	int lbr_callstack_users;
 	int lbr_stack_state;
 };
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index fbc1474..f6d1bc9 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -104,7 +104,6 @@
 #define X86_FEATURE_EXTD_APICID	( 3*32+26) /* has extended APICID (8 bits) */
 #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
 #define X86_FEATURE_APERFMPERF	( 3*32+28) /* APERFMPERF */
-/* free, was #define X86_FEATURE_EAGER_FPU	( 3*32+29) * "eagerfpu" Non lazy FPU restore */
 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 8554f96..2515284 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -14,6 +14,16 @@
 #ifndef _ASM_X86_FIXMAP_H
 #define _ASM_X86_FIXMAP_H
 
+/*
+ * Exposed to assembly code for setting up initial page tables. Cannot be
+ * calculated in assembly code (fixmap entries are an enum), but is sanity
+ * checked in the actual fixmap C code to make sure that the fixmap is
+ * covered fully.
+ */
+#define FIXMAP_PMD_NUM	2
+/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
+#define FIXMAP_PMD_TOP	507
+
 #ifndef __ASSEMBLY__
 #include <linux/kernel.h>
 #include <asm/acpi.h>
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 8852e3a..499d6ed 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -60,11 +60,6 @@
 /*
  * FPU related CPU feature flag helper routines:
  */
-static __always_inline __pure bool use_eager_fpu(void)
-{
-	return true;
-}
-
 static __always_inline __pure bool use_xsaveopt(void)
 {
 	return static_cpu_has(X86_FEATURE_XSAVEOPT);
@@ -501,24 +496,6 @@
 }
 
 
-/*
- * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
- * idiom, which is then paired with the sw-flag (fpregs_active) later on:
- */
-
-static inline void __fpregs_activate_hw(void)
-{
-	if (!use_eager_fpu())
-		clts();
-}
-
-static inline void __fpregs_deactivate_hw(void)
-{
-	if (!use_eager_fpu())
-		stts();
-}
-
-/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
 static inline void __fpregs_deactivate(struct fpu *fpu)
 {
 	WARN_ON_FPU(!fpu->fpregs_active);
@@ -528,7 +505,6 @@
 	trace_x86_fpu_regs_deactivated(fpu);
 }
 
-/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
 static inline void __fpregs_activate(struct fpu *fpu)
 {
 	WARN_ON_FPU(fpu->fpregs_active);
@@ -554,22 +530,17 @@
 }
 
 /*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
  * These generally need preemption protection to work,
  * do try to avoid using these on their own.
  */
 static inline void fpregs_activate(struct fpu *fpu)
 {
-	__fpregs_activate_hw();
 	__fpregs_activate(fpu);
 }
 
 static inline void fpregs_deactivate(struct fpu *fpu)
 {
 	__fpregs_deactivate(fpu);
-	__fpregs_deactivate_hw();
 }
 
 /*
@@ -596,8 +567,7 @@
 	 * or if the past 5 consecutive context-switches used math.
 	 */
 	fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
-		      new_fpu->fpstate_active &&
-		      (use_eager_fpu() || new_fpu->counter > 5);
+		      new_fpu->fpstate_active;
 
 	if (old_fpu->fpregs_active) {
 		if (!copy_fpregs_to_fpstate(old_fpu))
@@ -611,18 +581,13 @@
 
 		/* Don't change CR0.TS if we just switch! */
 		if (fpu.preload) {
-			new_fpu->counter++;
 			__fpregs_activate(new_fpu);
 			trace_x86_fpu_regs_activated(new_fpu);
 			prefetch(&new_fpu->state);
-		} else {
-			__fpregs_deactivate_hw();
 		}
 	} else {
-		old_fpu->counter = 0;
 		old_fpu->last_cpu = -1;
 		if (fpu.preload) {
-			new_fpu->counter++;
 			if (fpu_want_lazy_restore(new_fpu, cpu))
 				fpu.preload = 0;
 			else
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 48df486..3c80f5b 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -322,17 +322,6 @@
 	unsigned char			fpregs_active;
 
 	/*
-	 * @counter:
-	 *
-	 * This counter contains the number of consecutive context switches
-	 * during which the FPU stays used. If this is over a threshold, the
-	 * lazy FPU restore logic becomes eager, to save the trap overhead.
-	 * This is an unsigned char so that after 256 iterations the counter
-	 * wraps and the context switch behavior turns lazy again; this is to
-	 * deal with bursty apps that only use the FPU for a short time:
-	 */
-	unsigned char			counter;
-	/*
 	 * @state:
 	 *
 	 * In-memory copy of all FPU registers that we save/restore
@@ -340,29 +329,6 @@
 	 * the registers in the FPU are more recent than this state
 	 * copy. If the task context-switches away then they get
 	 * saved here and represent the FPU state.
-	 *
-	 * After context switches there may be a (short) time period
-	 * during which the in-FPU hardware registers are unchanged
-	 * and still perfectly match this state, if the tasks
-	 * scheduled afterwards are not using the FPU.
-	 *
-	 * This is the 'lazy restore' window of optimization, which
-	 * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
-	 *
-	 * We detect whether a subsequent task uses the FPU via setting
-	 * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
-	 *
-	 * During this window, if the task gets scheduled again, we
-	 * might be able to skip having to do a restore from this
-	 * memory buffer to the hardware registers - at the cost of
-	 * incurring the overhead of #NM fault traps.
-	 *
-	 * Note that on modern CPUs that support the XSAVEOPT (or other
-	 * optimized XSAVE instructions), we don't use #NM traps anymore,
-	 * as the hardware can track whether FPU registers need saving
-	 * or not. On such CPUs we activate the non-lazy ('eagerfpu')
-	 * logic, which unconditionally saves/restores all FPU state
-	 * across context switches. (if FPU state exists.)
 	 */
 	union fpregs_state		state;
 	/*
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 221a32e..d5c4df9 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -13,13 +13,14 @@
 #include <asm/processor.h>
 #include <linux/bitops.h>
 #include <linux/threads.h>
+#include <asm/fixmap.h>
 
 extern pud_t level3_kernel_pgt[512];
 extern pud_t level3_ident_pgt[512];
 extern pmd_t level2_kernel_pgt[512];
 extern pmd_t level2_fixmap_pgt[512];
 extern pmd_t level2_ident_pgt[512];
-extern pte_t level1_fixmap_pgt[512];
+extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
 extern pgd_t init_level4_pgt[];
 
 #define swapper_pg_dir init_level4_pgt
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 9217ab1..342e597 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -14,7 +14,6 @@
 		__field(struct fpu *, fpu)
 		__field(bool, fpregs_active)
 		__field(bool, fpstate_active)
-		__field(int, counter)
 		__field(u64, xfeatures)
 		__field(u64, xcomp_bv)
 		),
@@ -23,17 +22,15 @@
 		__entry->fpu		= fpu;
 		__entry->fpregs_active	= fpu->fpregs_active;
 		__entry->fpstate_active	= fpu->fpstate_active;
-		__entry->counter	= fpu->counter;
 		if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
 			__entry->xfeatures = fpu->state.xsave.header.xfeatures;
 			__entry->xcomp_bv  = fpu->state.xsave.header.xcomp_bv;
 		}
 	),
-	TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx",
+	TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
 			__entry->fpu,
 			__entry->fpregs_active,
 			__entry->fpstate_active,
-			__entry->counter,
 			__entry->xfeatures,
 			__entry->xcomp_bv
 	)
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 739c0c5..1bb90fa 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -356,5 +356,6 @@
 
 #define KVM_X86_QUIRK_LINT0_REENABLED	(1 << 0)
 #define KVM_X86_QUIRK_CD_NW_CLEARED	(1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE	(1 << 2)
 
 #endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 430c095..fc96511 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -59,27 +59,9 @@
 	return this_cpu_read(in_kernel_fpu);
 }
 
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- *
- * Except for the eagerfpu case when we return true; in the likely case
- * the thread has FPU but we are not going to set/clear TS.
- */
 static bool interrupted_kernel_fpu_idle(void)
 {
-	if (kernel_fpu_disabled())
-		return false;
-
-	if (use_eager_fpu())
-		return true;
-
-	return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
+	return !kernel_fpu_disabled();
 }
 
 /*
@@ -127,7 +109,6 @@
 		copy_fpregs_to_fpstate(fpu);
 	} else {
 		this_cpu_write(fpu_fpregs_owner_ctx, NULL);
-		__fpregs_activate_hw();
 	}
 }
 EXPORT_SYMBOL(__kernel_fpu_begin);
@@ -138,8 +119,6 @@
 
 	if (fpu->fpregs_active)
 		copy_kernel_to_fpregs(&fpu->state);
-	else
-		__fpregs_deactivate_hw();
 
 	kernel_fpu_enable();
 }
@@ -201,10 +180,7 @@
 	trace_x86_fpu_before_save(fpu);
 	if (fpu->fpregs_active) {
 		if (!copy_fpregs_to_fpstate(fpu)) {
-			if (use_eager_fpu())
-				copy_kernel_to_fpregs(&fpu->state);
-			else
-				fpregs_deactivate(fpu);
+			copy_kernel_to_fpregs(&fpu->state);
 		}
 	}
 	trace_x86_fpu_after_save(fpu);
@@ -249,7 +225,6 @@
 
 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 {
-	dst_fpu->counter = 0;
 	dst_fpu->fpregs_active = 0;
 	dst_fpu->last_cpu = -1;
 
@@ -262,8 +237,7 @@
 	 * Don't let 'init optimized' areas of the XSAVE area
 	 * leak into the child task:
 	 */
-	if (use_eager_fpu())
-		memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
+	memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
 
 	/*
 	 * Save current FPU registers directly into the child
@@ -285,10 +259,7 @@
 		memcpy(&src_fpu->state, &dst_fpu->state,
 		       fpu_kernel_xstate_size);
 
-		if (use_eager_fpu())
-			copy_kernel_to_fpregs(&src_fpu->state);
-		else
-			fpregs_deactivate(src_fpu);
+		copy_kernel_to_fpregs(&src_fpu->state);
 	}
 	preempt_enable();
 
@@ -461,7 +432,6 @@
 	trace_x86_fpu_before_restore(fpu);
 	fpregs_activate(fpu);
 	copy_kernel_to_fpregs(&fpu->state);
-	fpu->counter++;
 	trace_x86_fpu_after_restore(fpu);
 	kernel_fpu_enable();
 }
@@ -479,7 +449,6 @@
 void fpu__drop(struct fpu *fpu)
 {
 	preempt_disable();
-	fpu->counter = 0;
 
 	if (fpu->fpregs_active) {
 		/* Ignore delayed exceptions from user space */
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 3ec0d2d..3a93186 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -344,11 +344,9 @@
 		}
 
 		fpu->fpstate_active = 1;
-		if (use_eager_fpu()) {
-			preempt_disable();
-			fpu__restore(fpu);
-			preempt_enable();
-		}
+		preempt_disable();
+		fpu__restore(fpu);
+		preempt_enable();
 
 		return err;
 	} else {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index abfbb61b..e9d7f46 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -890,15 +890,6 @@
 	 */
 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
 		return -EINVAL;
-	/*
-	 * For most XSAVE components, this would be an arduous task:
-	 * brining fpstate up to date with fpregs, updating fpstate,
-	 * then re-populating fpregs.  But, for components that are
-	 * never lazily managed, we can just access the fpregs
-	 * directly.  PKRU is never managed lazily, so we can just
-	 * manipulate it directly.  Make sure it stays that way.
-	 */
-	WARN_ON_ONCE(!use_eager_fpu());
 
 	/* Set the bits we need in PKRU:  */
 	if (init_val & PKEY_DISABLE_ACCESS)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 9d72cf5..b0d6697 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -23,6 +23,7 @@
 #include "../entry/calling.h"
 #include <asm/export.h>
 #include <asm/nospec-branch.h>
+#include <asm/fixmap.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -493,13 +494,20 @@
 		KERNEL_IMAGE_SIZE/PMD_SIZE)
 
 NEXT_PAGE(level2_fixmap_pgt)
-	.fill	506,8,0
-	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
-	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
-	.fill	5,8,0
+	.fill	(512 - 4 - FIXMAP_PMD_NUM),8,0
+	pgtno = 0
+	.rept (FIXMAP_PMD_NUM)
+	.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
+		+ _PAGE_TABLE;
+	pgtno = pgtno + 1
+	.endr
+	/* 6 MB reserved space + a 2MB hole */
+	.fill	4,8,0
 
 NEXT_PAGE(level1_fixmap_pgt)
+	.rept (FIXMAP_PMD_NUM)
 	.fill	512,8,0
+	.endr
 
 #undef PMDS
 
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 0fe720d..3f818ce 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -12,6 +12,7 @@
 #include <asm/setup.h>
 #include <asm/apic.h>
 #include <asm/param.h>
+#include <asm/tsc.h>
 
 #define MAX_NUM_FREQS	9
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 7e5119c..c17d389 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -16,7 +16,6 @@
 #include <linux/export.h>
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
-#include <asm/fpu/internal.h> /* For use_eager_fpu.  Ugh! */
 #include <asm/user.h>
 #include <asm/fpu/xstate.h>
 #include "cpuid.h"
@@ -114,8 +113,7 @@
 	if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
 		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
 
-	if (use_eager_fpu())
-		kvm_x86_ops->fpu_activate(vcpu);
+	kvm_x86_ops->fpu_activate(vcpu);
 
 	/*
 	 * The existing code assumes virtual address is 48-bit in the canonical
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index a8a86be..69a81a7 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1220,9 +1220,8 @@
 
 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
 {
-	return kvm_apic_hw_enabled(apic) &&
-	    addr >= apic->base_address &&
-	    addr < apic->base_address + LAPIC_MMIO_LENGTH;
+	return addr >= apic->base_address &&
+		addr < apic->base_address + LAPIC_MMIO_LENGTH;
 }
 
 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
@@ -1234,6 +1233,15 @@
 	if (!apic_mmio_in_range(apic, address))
 		return -EOPNOTSUPP;
 
+	if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
+		if (!kvm_check_has_quirk(vcpu->kvm,
+					 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
+			return -EOPNOTSUPP;
+
+		memset(data, 0xff, len);
+		return 0;
+	}
+
 	kvm_lapic_reg_read(apic, offset, len, data);
 
 	return 0;
@@ -1646,6 +1654,14 @@
 	if (!apic_mmio_in_range(apic, address))
 		return -EOPNOTSUPP;
 
+	if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
+		if (!kvm_check_has_quirk(vcpu->kvm,
+					 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
+			return -EOPNOTSUPP;
+
+		return 0;
+	}
+
 	/*
 	 * APIC register must be aligned on 128-bits boundary.
 	 * 32/64/128 bits registers must be accessed thru 32 bits.
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 203d423..5013ef1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7631,16 +7631,6 @@
 	copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
 	__kernel_fpu_end();
 	++vcpu->stat.fpu_reload;
-	/*
-	 * If using eager FPU mode, or if the guest is a frequent user
-	 * of the FPU, just leave the FPU active for next time.
-	 * Every 255 times fpu_counter rolls over to 0; a guest that uses
-	 * the FPU in bursts will revert to loading it on demand.
-	 */
-	if (!use_eager_fpu()) {
-		if (++vcpu->fpu_counter < 5)
-			kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
-	}
 	trace_kvm_fpu(0);
 }
 
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index a8f90ce..dc6d990 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -60,7 +60,7 @@
 	eb->nid = nid;
 
 	if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
-		emu_nid_to_phys[nid] = nid;
+		emu_nid_to_phys[nid] = pb->nid;
 
 	pb->start += size;
 	if (pb->start >= pb->end) {
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index e30baa8..8cbed30 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -536,6 +536,15 @@
 {
 	unsigned long address = __fix_to_virt(idx);
 
+#ifdef CONFIG_X86_64
+       /*
+	* Ensure that the static initial page tables are covering the
+	* fixmap completely.
+	*/
+	BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
+		     (FIXMAP_PMD_NUM * PTRS_PER_PTE));
+#endif
+
 	if (idx >= __end_of_fixed_addresses) {
 		BUG();
 		return;
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index 0bbec04..e2d2b3c 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -142,8 +142,7 @@
  * Called from the FPU code when creating a fresh set of FPU
  * registers.  This is called from a very specific context where
  * we know the FPU regstiers are safe for use and we can use PKRU
- * directly.  The fact that PKRU is only available when we are
- * using eagerfpu mode makes this possible.
+ * directly.
  */
 void copy_init_pkru_to_fpregs(void)
 {
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c92f75f..ebceaba 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1936,7 +1936,7 @@
 		 * L3_k[511] -> level2_fixmap_pgt */
 		convert_pfn_mfn(level3_kernel_pgt);
 
-		/* L3_k[511][506] -> level1_fixmap_pgt */
+		/* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
 		convert_pfn_mfn(level2_fixmap_pgt);
 	}
 	/* We get [511][511] and have Xen's version of level2_kernel_pgt */
@@ -1970,7 +1970,11 @@
 		set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
 		set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
 		set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
-		set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+
+		for (i = 0; i < FIXMAP_PMD_NUM; i++) {
+			set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
+				      PAGE_KERNEL_RO);
+		}
 
 		/* Pin down new L4 */
 		pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index b9fc525..0b29a43 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -477,7 +477,7 @@
 irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
 {
 	int err, ret = IRQ_NONE;
-	struct pt_regs regs;
+	struct pt_regs regs = {0};
 	const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
 	uint8_t xenpmu_flags = get_xenpmu_flags();
 
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 860c9e5..3bc0e76 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -367,6 +367,7 @@
 	strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
 	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
 		sizeof(rblkcipher.geniv));
+	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
 
 	rblkcipher.blocksize = alg->cra_blocksize;
 	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
@@ -441,6 +442,7 @@
 	strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
 	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
 		sizeof(rblkcipher.geniv));
+	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
 
 	rblkcipher.blocksize = alg->cra_blocksize;
 	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 27f9866..59a0936 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -510,6 +510,7 @@
 	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
 	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
 		sizeof(rblkcipher.geniv));
+	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
 
 	rblkcipher.blocksize = alg->cra_blocksize;
 	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9480d84..5960816 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -138,6 +138,8 @@
 
 source "drivers/xen/Kconfig"
 
+source "drivers/vservices/Kconfig"
+
 source "drivers/staging/Kconfig"
 
 source "drivers/platform/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 06e2bb4..557cba5 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -10,6 +10,8 @@
 
 obj-$(CONFIG_GENERIC_PHY)	+= phy/
 
+obj-$(CONFIG_VSERVICES_SUPPORT)	+= vservices/
+
 # GPIO must come after pinctrl as gpios may need to mux pins etc
 obj-$(CONFIG_PINCTRL)		+= pinctrl/
 obj-$(CONFIG_GPIOLIB)		+= gpio/
diff --git a/drivers/base/dma-removed.c b/drivers/base/dma-removed.c
index 09e77d5..4ecce1f 100644
--- a/drivers/base/dma-removed.c
+++ b/drivers/base/dma-removed.c
@@ -223,7 +223,7 @@
 	bool skip_zeroing = attrs & DMA_ATTR_SKIP_ZEROING;
 	int pageno;
 	unsigned long order;
-	void *addr = NULL;
+	void __iomem *addr = NULL;
 	struct removed_region *dma_mem = dev->removed_mem;
 	int nbits;
 	unsigned int align;
@@ -261,7 +261,7 @@
 			goto out;
 		}
 
-		addr = ioremap(base, size);
+		addr = ioremap_wc(base, size);
 		if (WARN_ON(!addr)) {
 			bitmap_clear(dma_mem->bitmap, pageno, nbits);
 		} else {
@@ -355,10 +355,10 @@
 {
 }
 
-void *removed_remap(struct device *dev, void *cpu_addr, dma_addr_t handle,
-			size_t size, unsigned long attrs)
+static void __iomem *removed_remap(struct device *dev, void *cpu_addr,
+			dma_addr_t handle, size_t size, unsigned long attrs)
 {
-	return ioremap(handle, size);
+	return ioremap_wc(handle, size);
 }
 
 void removed_unremap(struct device *dev, void *remapped_address, size_t size)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index dc259d2..574d08f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1362,8 +1362,10 @@
 
 	dpm_wait_for_children(dev, async);
 
-	if (async_error)
+	if (async_error) {
+		dev->power.direct_complete = false;
 		goto Complete;
+	}
 
 	/*
 	 * If a device configured to wake up the system from sleep states
@@ -1378,6 +1380,7 @@
 		pm_get_active_wakeup_sources(suspend_abort,
 			MAX_SUSPEND_ABORT_LEN);
 		log_suspend_abort_reason(suspend_abort);
+		dev->power.direct_complete = false;
 		async_error = -EBUSY;
 		goto Complete;
 	}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 39dd30b..64d95c9 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -538,3 +538,23 @@
 	  module will be called rsxx.
 
 endif # BLK_DEV
+
+config VSERVICES_BLOCK_SERVER
+	tristate "Virtual Services block server"
+	depends on BLOCK && VSERVICES_SUPPORT && VSERVICES_SERVER
+	default y
+	select VSERVICES_PROTOCOL_BLOCK_SERVER
+	help
+	  Select this option if you want support for server side Virtual
+	  Services block. This allows any Linux block device to be
+	  virtualized and exported as a virtual service.
+
+config VSERVICES_BLOCK_CLIENT
+	tristate "Virtual Services Block client device"
+	depends on BLOCK && VSERVICES_SUPPORT && VSERVICES_CLIENT
+	default y
+	select VSERVICES_PROTOCOL_BLOCK_CLIENT
+	help
+	  Select this option if you want support for client side Virtual
+	  Services block devices. The virtual block devices are typically
+	  named /dev/vblock0, /dev/vblock1, etc.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 1e9661e..fe9229f1 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -44,3 +44,8 @@
 
 skd-y		:= skd_main.o
 swim_mod-y	:= swim.o swim_asm.o
+
+obj-$(CONFIG_VSERVICES_BLOCK_SERVER)     += vs_block_server.o
+CFLAGS_vs_block_server.o += -Werror
+obj-$(CONFIG_VSERVICES_BLOCK_CLIENT)     += vs_block_client.o
+CFLAGS_vs_block_client.o += -Werror
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e3d8e4c..a321d7d 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3459,6 +3459,9 @@
 					  (struct floppy_struct **)&outparam);
 		if (ret)
 			return ret;
+		memcpy(&inparam.g, outparam,
+				offsetof(struct floppy_struct, name));
+		outparam = &inparam.g;
 		break;
 	case FDMSGON:
 		UDP->flags |= FTD_MSG;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 9f840d9..a08c223 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1059,6 +1059,7 @@
 	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
 	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
 	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
+	blk_queue_logical_block_size(lo->lo_queue, 512);
 	if (bdev) {
 		bdput(bdev);
 		invalidate_bdev(bdev);
@@ -1344,6 +1345,24 @@
 	return error;
 }
 
+static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
+{
+	if (lo->lo_state != Lo_bound)
+		return -ENXIO;
+
+	if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
+		return -EINVAL;
+
+	blk_mq_freeze_queue(lo->lo_queue);
+
+	blk_queue_logical_block_size(lo->lo_queue, arg);
+	loop_update_dio(lo);
+
+	blk_mq_unfreeze_queue(lo->lo_queue);
+
+	return 0;
+}
+
 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
 	unsigned int cmd, unsigned long arg)
 {
@@ -1392,6 +1411,11 @@
 		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
 			err = loop_set_dio(lo, arg);
 		break;
+	case LOOP_SET_BLOCK_SIZE:
+		err = -EPERM;
+		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+			err = loop_set_block_size(lo, arg);
+		break;
 	default:
 		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
 	}
@@ -1546,6 +1570,7 @@
 		arg = (unsigned long) compat_ptr(arg);
 	case LOOP_SET_FD:
 	case LOOP_CHANGE_FD:
+	case LOOP_SET_BLOCK_SIZE:
 		err = lo_ioctl(bdev, mode, cmd, arg);
 		break;
 	default:
@@ -1781,6 +1806,7 @@
 	}
 	lo->lo_queue->queuedata = lo;
 
+	blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
 	/*
 	 * It doesn't make sense to enable merge because the I/O
 	 * submitted to backing file is handled page by page.
diff --git a/drivers/block/vs_block_client.c b/drivers/block/vs_block_client.c
new file mode 100644
index 0000000..974f8b9
--- /dev/null
+++ b/drivers/block/vs_block_client.c
@@ -0,0 +1,956 @@
+/*
+ * drivers/block/vs_block_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * block vservice client driver
+ *
+ * Function vs_block_client_vs_alloc() is partially derived from
+ * drivers/block/brd.c (brd_alloc())
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/version.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/client.h>
+#include <vservices/service.h>
+#include <vservices/session.h>
+#include <vservices/wait.h>
+
+/*
+ * BLK_DEF_MAX_SECTORS was replaced with the hard-coded number 1024 in 3.19,
+ * and restored in 4.3
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && \
+        (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+#define BLK_DEF_MAX_SECTORS 1024
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+#define bio_sector(bio) (bio)->bi_iter.bi_sector
+#define bio_size(bio) (bio)->bi_iter.bi_size
+#else
+#define bio_sector(bio) (bio)->bi_sector
+#define bio_size(bio) (bio)->bi_size
+#endif
+
+#define CLIENT_BLKDEV_NAME		"vblock"
+
+#define PERDEV_MINORS 256
+
+struct block_client;
+
+struct vs_block_device {
+	/*
+	 * The client that created this block device. A reference is held
+	 * to the client until the block device is released, so this pointer
+	 * should always be valid. However, the client may since have reset;
+	 * so it should only be used if, after locking it, its blkdev pointer
+	 * points back to this block device.
+	 */
+	struct block_client		*client;
+
+	int				id;
+	struct gendisk			*disk;
+	struct request_queue		*queue;
+
+	struct kref			kref;
+};
+
+struct block_client {
+	struct vs_client_block_state	client;
+	struct vs_service_device	*service;
+
+	/* Tasklet & queue for bouncing buffers out of read acks */
+	struct tasklet_struct		rx_tasklet;
+	struct list_head		rx_queue;
+	struct spinlock			rx_queue_lock;
+
+	/*
+	 * The current virtual block device. This gets replaced when we do
+	 * a reset since other parts of the kernel (e.g. vfs) may still
+	 * be accessing the disk.
+	 */
+	struct vs_block_device		*blkdev;
+
+	/* Shared work item for disk creation */
+	struct work_struct		disk_creation_work;
+
+	struct kref			kref;
+};
+
+#define state_to_block_client(state) \
+	container_of(state, struct block_client, client)
+
+static int block_client_major;
+
+/* Unique identifier allocation for virtual block devices */
+static DEFINE_IDA(vs_block_ida);
+static DEFINE_MUTEX(vs_block_ida_lock);
+
+static int
+block_client_vs_to_linux_error(vservice_block_block_io_error_t vs_err)
+{
+	switch (vs_err) {
+	case VSERVICE_BLOCK_INVALID_INDEX:
+		return -EILSEQ;
+	case VSERVICE_BLOCK_MEDIA_FAILURE:
+		return -EIO;
+	case VSERVICE_BLOCK_MEDIA_TIMEOUT:
+		return -ETIMEDOUT;
+	case VSERVICE_BLOCK_UNSUPPORTED_COMMAND:
+		return -ENOTSUPP;
+	case VSERVICE_BLOCK_SERVICE_RESET:
+		return -ENXIO;
+	default:
+		WARN_ON(vs_err);
+		return 0;
+	}
+
+	return 0;
+}
+
+static void vs_block_client_kfree(struct kref *kref)
+{
+	struct block_client *client =
+		container_of(kref, struct block_client, kref);
+
+	vs_put_service(client->service);
+	kfree(client);
+}
+
+static void vs_block_client_put(struct block_client *client)
+{
+	kref_put(&client->kref, vs_block_client_kfree);
+}
+
+static void vs_block_device_kfree(struct kref *kref)
+{
+	struct vs_block_device *blkdev =
+		container_of(kref, struct vs_block_device, kref);
+
+	/* Delete the disk and clean up its queue */
+	del_gendisk(blkdev->disk);
+	blk_cleanup_queue(blkdev->queue);
+	put_disk(blkdev->disk);
+
+	mutex_lock(&vs_block_ida_lock);
+	ida_remove(&vs_block_ida, blkdev->id);
+	mutex_unlock(&vs_block_ida_lock);
+
+	if (blkdev->client)
+		vs_block_client_put(blkdev->client);
+
+	kfree(blkdev);
+}
+
+static void vs_block_device_put(struct vs_block_device *blkdev)
+{
+	kref_put(&blkdev->kref, vs_block_device_kfree);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+static void
+#else
+static int
+#endif
+vs_block_client_blkdev_release(struct gendisk *disk, fmode_t mode)
+{
+	struct vs_block_device *blkdev = disk->private_data;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+	if (WARN_ON(!blkdev))
+		return;
+#else
+	if (WARN_ON(!blkdev))
+		return -ENXIO;
+#endif
+
+	vs_block_device_put(blkdev);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
+	return 0;
+#endif
+}
+
+static int vs_block_client_blkdev_open(struct block_device *bdev, fmode_t mode)
+{
+	struct vs_block_device *blkdev = bdev->bd_disk->private_data;
+	struct block_client *client;
+	int err = -ENXIO;
+
+	if (!blkdev || !kref_get_unless_zero(&blkdev->kref))
+		goto fail_get_blkdev;
+
+	client = blkdev->client;
+	if (WARN_ON(!client))
+		goto fail_lock_client;
+
+	if (!vs_state_lock_safe(&client->client)) {
+		err = -ENODEV;
+		goto fail_lock_client;
+	}
+
+	if (blkdev != client->blkdev) {
+		/* The client has reset, this blkdev is no longer usable */
+		err = -ENXIO;
+		goto fail_check_client;
+	}
+
+	if ((mode & FMODE_WRITE) > 0 && client->client.readonly) {
+		dev_dbg(&client->service->dev,
+			"opening a readonly disk as writable\n");
+		err = -EROFS;
+		goto fail_check_client;
+	}
+
+	vs_state_unlock(&client->client);
+
+	return 0;
+
+fail_check_client:
+	vs_state_unlock(&client->client);
+fail_lock_client:
+	vs_block_device_put(blkdev);
+fail_get_blkdev:
+	return err;
+}
+
+static int vs_block_client_blkdev_getgeo(struct block_device *bdev,
+		struct hd_geometry *geo)
+{
+	/* These numbers are some default sane values for disk geometry. */
+	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+	geo->heads = 4;
+	geo->sectors = 16;
+
+	return 0;
+}
+
+/*
+ * Indirectly determine linux block layer sector size and ensure that our
+ * sector size matches.
+ */
+static int vs_block_client_check_sector_size(struct block_client *client,
+		struct bio *bio)
+{
+	unsigned int expected_bytes;
+
+	if (unlikely(!bio_sectors(bio))) {
+		dev_err(&client->service->dev, "zero-length bio");
+		return -EIO;
+	}
+
+	expected_bytes = bio_sectors(bio) * client->client.sector_size;
+	if (unlikely(bio_size(bio) != expected_bytes)) {
+		dev_err(&client->service->dev,
+				"bio has %zd bytes, which is unexpected "
+				"for %d sectors of %zd bytes each",
+				(size_t)bio_size(bio), bio_sectors(bio),
+				(size_t)client->client.sector_size);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static const struct block_device_operations block_client_ops = {
+	.getgeo		= vs_block_client_blkdev_getgeo,
+	.open		= vs_block_client_blkdev_open,
+	.release	= vs_block_client_blkdev_release,
+	.owner		= THIS_MODULE,
+};
+
+static int block_client_send_write_req(struct block_client *client,
+		struct bio *bio)
+{
+	struct vs_client_block_state *state = &client->client;
+	struct vs_mbuf *mbuf;
+	struct vs_pbuf pbuf;
+	struct bio_vec *bvec;
+	int err;
+	bool flush, nodelay, commit;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+	struct bvec_iter iter;
+	struct bio_vec bvec_local;
+#else
+	int i;
+#endif
+
+	err = vs_block_client_check_sector_size(client, bio);
+	if (err < 0)
+		goto fail;
+
+	do {
+		/* Wait until it's possible to send a write request */
+		err = vs_wait_state_nointr(state,
+				vs_client_block_io_req_write_can_send(state));
+		if (err == -ECANCELED)
+			err = -ENXIO;
+		if (err < 0)
+			goto fail;
+
+		/* Wait for quota, while sending a write remains possible */
+		mbuf = vs_wait_alloc_nointr(state,
+				vs_client_block_io_req_write_can_send(state),
+				vs_client_block_io_alloc_req_write(
+					state, &pbuf, GFP_KERNEL));
+		err = IS_ERR(mbuf) ? PTR_ERR(mbuf) : 0;
+
+		/* Retry if sending is no longer possible */
+	} while (err == -ECANCELED);
+
+	if (err < 0)
+		goto fail;
+
+	vs_pbuf_resize(&pbuf, 0);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+	bvec = &bvec_local;
+	bio_for_each_segment(bvec_local, bio, iter)
+#else
+	bio_for_each_segment(bvec, bio, i)
+#endif
+	{
+		unsigned long flags;
+		void *buf = bvec_kmap_irq(bvec, &flags);
+		flush_kernel_dcache_page(bvec->bv_page);
+		err = vs_pbuf_append(&pbuf, buf, bvec->bv_len);
+		bvec_kunmap_irq(buf, &flags);
+		if (err < 0) {
+			dev_err(&client->service->dev,
+				"pbuf copy failed with err %d\n", err);
+			err = -EIO;
+			goto fail_free_write;
+		}
+	}
+
+	if (unlikely(vs_pbuf_size(&pbuf) != bio_size(bio))) {
+		dev_err(&client->service->dev,
+			"pbuf size is wrong: %zd, should be %zd\n",
+			vs_pbuf_size(&pbuf), (size_t)bio_size(bio));
+		err = -EIO;
+		goto fail_free_write;
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+	flush = (bio_flags(bio) & REQ_PREFLUSH);
+	commit = (bio_flags(bio) & REQ_FUA);
+	nodelay = (bio_flags(bio) & REQ_SYNC);
+#else
+	flush = (bio->bi_rw & REQ_FLUSH);
+	commit = (bio->bi_rw & REQ_FUA);
+	nodelay = (bio->bi_rw & REQ_SYNC);
+#endif
+	err = vs_client_block_io_req_write(state, bio, bio_sector(bio),
+			bio_sectors(bio), nodelay, flush, commit, pbuf, mbuf);
+
+	if (err) {
+		dev_err(&client->service->dev,
+				"write req failed with err %d\n", err);
+		goto fail_free_write;
+	}
+
+	return 0;
+
+fail_free_write:
+	vs_client_block_io_free_req_write(state, &pbuf, mbuf);
+fail:
+	return err;
+}
+
+static int block_client_send_read_req(struct block_client *client,
+		struct bio *bio)
+{
+	struct vs_client_block_state *state = &client->client;
+	int err;
+	bool flush, nodelay;
+
+	err = vs_block_client_check_sector_size(client, bio);
+	if (err < 0)
+		return err;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+	flush = (bio_flags(bio) & REQ_PREFLUSH);
+	nodelay = (bio_flags(bio) & REQ_SYNC);
+#else
+	flush = (bio->bi_rw & REQ_FLUSH);
+	nodelay = (bio->bi_rw & REQ_SYNC);
+#endif
+	do {
+		/* Wait until it's possible to send a read request */
+		err = vs_wait_state_nointr(state,
+				vs_client_block_io_req_read_can_send(state));
+		if (err == -ECANCELED)
+			err = -ENXIO;
+		if (err < 0)
+			break;
+
+		/* Wait for quota, while sending a read remains possible */
+		err = vs_wait_send_nointr(state,
+			vs_client_block_io_req_read_can_send(state),
+			vs_client_block_io_req_read(state, bio,
+				bio_sector(bio), bio_sectors(bio),
+				nodelay, flush, GFP_KERNEL));
+	} while (err == -ECANCELED);
+
+	return err;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+static blk_qc_t
+#else
+static void
+#endif
+vs_block_client_make_request(struct request_queue *q, struct bio *bio)
+{
+	struct block_device *bdev = bio->bi_bdev;
+	struct vs_block_device *blkdev = bdev->bd_disk->private_data;
+	struct block_client *client;
+	int err = 0;
+
+	client = blkdev->client;
+	if (!client || !kref_get_unless_zero(&client->kref)) {
+		err = -ENODEV;
+		goto fail_get_client;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	blk_queue_split(q, &bio, q->bio_split);
+#endif
+
+	if (!vs_state_lock_safe(&client->client)) {
+		err = -ENODEV;
+		goto fail_lock_client;
+	}
+
+	if (client->blkdev != blkdev) {
+		/* Client has reset, this block device is no longer usable */
+		err = -EIO;
+		goto fail_check_client;
+	}
+
+	if (bio_data_dir(bio) == WRITE)
+		err = block_client_send_write_req(client, bio);
+	else
+		err = block_client_send_read_req(client, bio);
+
+fail_check_client:
+	if (err == -ENOLINK)
+		err = -EIO;
+	else
+		vs_state_unlock(&client->client);
+fail_lock_client:
+	vs_block_client_put(client);
+fail_get_client:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	if (err < 0) {
+		bio->bi_error = err;
+		bio_endio(bio);
+	}
+#else
+	if (err < 0)
+		bio_endio(bio, err);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+	return BLK_QC_T_NONE;
+#endif
+}
+
+static int vs_block_client_get_blkdev_id(struct block_client *client)
+{
+	int id;
+	int ret;
+
+retry:
+	ret = ida_pre_get(&vs_block_ida, GFP_KERNEL);
+	if (ret == 0)
+		return -ENOMEM;
+
+	mutex_lock(&vs_block_ida_lock);
+	ret = ida_get_new(&vs_block_ida, &id);
+	mutex_unlock(&vs_block_ida_lock);
+
+	if (ret == -EAGAIN)
+		goto retry;
+
+	return id;
+}
+
+static int vs_block_client_disk_add(struct block_client *client)
+{
+	struct vs_block_device *blkdev;
+	unsigned int max_hw_sectors;
+	int err;
+
+	dev_dbg(&client->service->dev, "device add\n");
+
+	blkdev = kzalloc(sizeof(*blkdev), GFP_KERNEL);
+	if (!blkdev) {
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	kref_init(&blkdev->kref);
+	blkdev->id = vs_block_client_get_blkdev_id(client);
+	if (blkdev->id < 0) {
+		err = blkdev->id;
+		goto fail_free_blkdev;
+	}
+
+	if ((blkdev->id * PERDEV_MINORS) >> MINORBITS) {
+		err = -ENODEV;
+		goto fail_remove_ida;
+	}
+
+	blkdev->queue = blk_alloc_queue(GFP_KERNEL);
+	if (!blkdev->queue) {
+		dev_err(&client->service->dev,
+				"Error initializing blk queue\n");
+		err = -ENOMEM;
+		goto fail_remove_ida;
+	}
+
+	blk_queue_make_request(blkdev->queue, vs_block_client_make_request);
+	blk_queue_bounce_limit(blkdev->queue, BLK_BOUNCE_ANY);
+	blk_queue_dma_alignment(blkdev->queue, 0);
+
+	/*
+	 * Mark this as a paravirtualised device. This is just an alias
+	 * of QUEUE_FLAG_NONROT, which prevents the I/O schedulers trying
+	 * to wait for the disk to spin.
+	 */
+	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, blkdev->queue);
+
+	blkdev->queue->queuedata = blkdev;
+
+	blkdev->client = client;
+	kref_get(&client->kref);
+
+	max_hw_sectors = min_t(sector_t, BLK_DEF_MAX_SECTORS,
+			client->client.segment_size /
+			client->client.sector_size);
+	blk_queue_max_hw_sectors(blkdev->queue, max_hw_sectors);
+
+	blkdev->disk = alloc_disk(PERDEV_MINORS);
+	if (!blkdev->disk) {
+		dev_err(&client->service->dev, "Error allocating disk\n");
+		err = -ENOMEM;
+		goto fail_free_blk_queue;
+	}
+
+	if (client->client.readonly) {
+		dev_dbg(&client->service->dev, "set device as readonly\n");
+		set_disk_ro(blkdev->disk, true);
+	}
+
+	blkdev->disk->major = block_client_major;
+	blkdev->disk->first_minor = blkdev->id * PERDEV_MINORS;
+	blkdev->disk->fops         = &block_client_ops;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	blkdev->disk->driverfs_dev = &client->service->dev;
+#endif
+	blkdev->disk->private_data = blkdev;
+	blkdev->disk->queue        = blkdev->queue;
+	blkdev->disk->flags       |= GENHD_FL_EXT_DEVT;
+
+	/*
+	 * The block device name is vblock<x>, where x is a unique
+	 * identifier. Userspace should rename or symlink the device
+	 * appropriately, typically by processing the add uevent.
+	 *
+	 * If a virtual block device is reset then it may re-open with a
+	 * different identifier if something still holds a reference to
+	 * the old device (such as a userspace application having an open
+	 * file handle).
+	 */
+	snprintf(blkdev->disk->disk_name, sizeof(blkdev->disk->disk_name),
+			"%s%d", CLIENT_BLKDEV_NAME, blkdev->id);
+	set_capacity(blkdev->disk, client->client.device_sectors);
+
+	/*
+	 * We need to hold a reference on blkdev across add_disk(), to make
+	 * sure a concurrent reset does not immediately release the blkdev
+	 * and call del_gendisk().
+	 */
+	kref_get(&blkdev->kref);
+
+	vs_service_state_lock(client->service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(client->client.state.base)) {
+		vs_service_state_unlock(client->service);
+		err = -ENXIO;
+		goto fail_free_blk_queue;
+	}
+	client->blkdev = blkdev;
+	vs_service_state_unlock(client->service);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	device_add_disk(&client->service->dev, blkdev->disk);
+#else
+	add_disk(blkdev->disk);
+#endif
+	dev_dbg(&client->service->dev, "added block disk '%s'\n",
+			blkdev->disk->disk_name);
+
+	/* Release the reference taken above. */
+	vs_block_device_put(blkdev);
+
+	return 0;
+
+fail_free_blk_queue:
+	blk_cleanup_queue(blkdev->queue);
+fail_remove_ida:
+	mutex_lock(&vs_block_ida_lock);
+	ida_remove(&vs_block_ida, blkdev->id);
+	mutex_unlock(&vs_block_ida_lock);
+fail_free_blkdev:
+	kfree(blkdev);
+fail:
+	return err;
+}
+
+static void vs_block_client_disk_creation_work(struct work_struct *work)
+{
+	struct block_client *client = container_of(work,
+			struct block_client, disk_creation_work);
+	struct vs_block_device *blkdev;
+	bool running;
+
+	vs_service_state_lock(client->service);
+	blkdev = client->blkdev;
+	running = VSERVICE_BASE_STATE_IS_RUNNING(client->client.state.base);
+
+	dev_dbg(&client->service->dev,
+			"disk changed: blkdev = %pK, running = %d\n",
+			client->blkdev, running);
+	if (!blkdev && running) {
+		dev_dbg(&client->service->dev, "adding block disk\n");
+		vs_service_state_unlock(client->service);
+		vs_block_client_disk_add(client);
+	} else {
+		vs_service_state_unlock(client->service);
+	}
+}
+
+static void vs_block_client_rx_tasklet(unsigned long data);
+
+static struct vs_client_block_state *
+vs_block_client_alloc(struct vs_service_device *service)
+{
+	struct block_client *client;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client) {
+		dev_err(&service->dev, "Error allocating client struct\n");
+		return NULL;
+	}
+
+	vs_get_service(service);
+	client->service = service;
+
+	INIT_LIST_HEAD(&client->rx_queue);
+	spin_lock_init(&client->rx_queue_lock);
+	tasklet_init(&client->rx_tasklet, vs_block_client_rx_tasklet,
+			(unsigned long)client);
+	tasklet_disable(&client->rx_tasklet);
+
+	INIT_WORK(&client->disk_creation_work,
+			vs_block_client_disk_creation_work);
+	kref_init(&client->kref);
+
+	dev_dbg(&service->dev, "New block client %pK\n", client);
+
+	return &client->client;
+}
+
+static void vs_block_client_release(struct vs_client_block_state *state)
+{
+	struct block_client *client = state_to_block_client(state);
+
+	flush_work(&client->disk_creation_work);
+
+	vs_block_client_put(client);
+}
+
+/* FIXME: Jira ticket SDK-2459 - anjaniv */
+static void vs_block_client_closed(struct vs_client_block_state *state)
+{
+	struct block_client *client = state_to_block_client(state);
+
+	/*
+	 * Stop the RX bounce tasklet and clean up its queue. We can wait for
+	 * it to stop safely because it doesn't need to acquire the state
+	 * lock, only the RX lock which we acquire after it is disabled.
+	 */
+	tasklet_disable(&client->rx_tasklet);
+	spin_lock(&client->rx_queue_lock);
+	while (!list_empty(&client->rx_queue)) {
+		struct vs_mbuf *mbuf = list_first_entry(&client->rx_queue,
+				struct vs_mbuf, queue);
+		struct vs_pbuf pbuf;
+		list_del(&mbuf->queue);
+		vs_client_block_io_getbufs_ack_read(state, &pbuf, mbuf);
+		vs_client_block_io_free_ack_read(state, &pbuf, mbuf);
+	}
+	spin_unlock(&client->rx_queue_lock);
+
+	if (client->blkdev) {
+		struct vs_block_device *blkdev = client->blkdev;
+		char service_remove[] = "REMOVING_SERVICE=1";
+		/* + 9 because "DEVNAME=" is 8 chars plus 1 for '\0' */
+		char devname[sizeof(blkdev->disk->disk_name) + 9];
+		char *envp[] = { service_remove, devname, NULL };
+
+		dev_dbg(&client->service->dev, "removing block disk\n");
+
+		/*
+		 * Send a change event with DEVNAME to allow the block helper
+		 * script to remove any server sessions which use either
+		 * v${SERVICE_NAME} or ${DEVNAME}.  The remove event generated
+		 * by the session driver doesn't include DEVNAME so the only
+		 * way for userspace to map SERVICE_NAME to DEVNAME is by the
+		 * symlink added when the client service was created.  If that
+		 * symlink has been deleted, there's no other way to connect
+		 * the two names.
+		 */
+		snprintf(devname, sizeof(devname), "DEVNAME=%s",
+				blkdev->disk->disk_name);
+		kobject_uevent_env(&client->service->dev.kobj, KOBJ_CHANGE,
+				envp);
+
+		/*
+		 * We are done with the device now. The block device will only
+		 * get removed once there are no more users (e.g. userspace
+		 * applications).
+		 */
+		client->blkdev = NULL;
+		vs_block_device_put(blkdev);
+	}
+}
+
+static void vs_block_client_opened(struct vs_client_block_state *state)
+{
+	struct block_client *client = state_to_block_client(state);
+
+#if !defined(CONFIG_LBDAF) && !defined(CONFIG_64BIT)
+	if (state->device_sectors >> (sizeof(sector_t) * 8)) {
+		dev_err(&client->service->dev,
+				"Client doesn't support full capacity large block devices\n");
+		vs_client_block_close(state);
+		return;
+	}
+#endif
+
+	/* Unblock the RX bounce tasklet. */
+	tasklet_enable(&client->rx_tasklet);
+
+	/*
+	 * The block device allocation needs to sleep, so we defer it to a
+	 * work queue.
+	 */
+	queue_work(client->service->work_queue, &client->disk_creation_work);
+}
+
+static int vs_block_client_ack_read(struct vs_client_block_state *state,
+		void *tag, struct vs_pbuf pbuf, struct vs_mbuf *mbuf)
+{
+	struct block_client *client = state_to_block_client(state);
+	struct bio *bio = tag;
+	struct bio_vec *bvec;
+	int err = 0;
+	size_t bytes_read = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+	struct bio_vec bvec_local;
+	struct bvec_iter iter;
+#else
+	int i;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+	bvec = &bvec_local;
+	bio_for_each_segment(bvec_local, bio, iter)
+#else
+	bio_for_each_segment(bvec, bio, i)
+#endif
+	{
+		unsigned long flags;
+		void *buf;
+		if (vs_pbuf_size(&pbuf) < bytes_read + bvec->bv_len) {
+			dev_err(&client->service->dev,
+					"bio read overrun: %zu into %zu byte response, but need %zd bytes\n",
+					bytes_read, vs_pbuf_size(&pbuf),
+					(size_t)bvec->bv_len);
+			err = -EIO;
+			break;
+		}
+		buf = bvec_kmap_irq(bvec, &flags);
+		memcpy(buf, vs_pbuf_data(&pbuf) + bytes_read, bvec->bv_len);
+		flush_kernel_dcache_page(bvec->bv_page);
+		bvec_kunmap_irq(buf, &flags);
+		bytes_read += bvec->bv_len;
+	}
+
+	vs_client_block_io_free_ack_read(state, &pbuf, mbuf);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	if (err < 0)
+		bio->bi_error = err;
+	bio_endio(bio);
+#else
+	bio_endio(bio, err);
+#endif
+
+	return 0;
+}
+
+static void vs_block_client_rx_tasklet(unsigned long data)
+{
+	struct block_client *client = (struct block_client *)data;
+	struct vs_mbuf *mbuf;
+	struct vs_pbuf pbuf;
+
+	spin_lock(&client->rx_queue_lock);
+
+	/* The list shouldn't be empty. */
+	if (WARN_ON(list_empty(&client->rx_queue))) {
+		spin_unlock(&client->rx_queue_lock);
+		return;
+	}
+
+	/* Get the next mbuf, and reschedule ourselves if there are more. */
+	mbuf = list_first_entry(&client->rx_queue, struct vs_mbuf, queue);
+	list_del(&mbuf->queue);
+	if (!list_empty(&client->rx_queue))
+		tasklet_schedule(&client->rx_tasklet);
+
+	spin_unlock(&client->rx_queue_lock);
+
+	/* Process the ack. */
+	vs_client_block_io_getbufs_ack_read(&client->client, &pbuf, mbuf);
+	vs_block_client_ack_read(&client->client, mbuf->priv, pbuf, mbuf);
+}
+
+static int vs_block_client_queue_ack_read(struct vs_client_block_state *state,
+		void *tag, struct vs_pbuf pbuf, struct vs_mbuf *mbuf)
+{
+	struct block_client *client = state_to_block_client(state);
+
+	spin_lock(&client->rx_queue_lock);
+	list_add_tail(&mbuf->queue, &client->rx_queue);
+	mbuf->priv = tag;
+	spin_unlock(&client->rx_queue_lock);
+
+	tasklet_schedule(&client->rx_tasklet);
+
+	wake_up(&state->service->quota_wq);
+
+	return 0;
+}
+
+static int vs_block_client_ack_write(struct vs_client_block_state *state,
+		void *tag)
+{
+	struct bio *bio = tag;
+
+	if (WARN_ON(!bio))
+		return -EPROTO;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	bio_endio(bio);
+#else
+	bio_endio(bio, 0);
+#endif
+
+	wake_up(&state->service->quota_wq);
+
+	return 0;
+}
+
+static int vs_block_client_nack_io(struct vs_client_block_state *state,
+		void *tag, vservice_block_block_io_error_t err)
+{
+	struct bio *bio = tag;
+
+	if (WARN_ON(!bio))
+		return -EPROTO;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	bio->bi_error = block_client_vs_to_linux_error(err);
+	bio_endio(bio);
+#else
+	bio_endio(bio, block_client_vs_to_linux_error(err));
+#endif
+
+	wake_up(&state->service->quota_wq);
+
+	return 0;
+}
+
+static struct vs_client_block block_client_driver = {
+	.rx_atomic		= true,
+	.alloc			= vs_block_client_alloc,
+	.release		= vs_block_client_release,
+	.opened			= vs_block_client_opened,
+	.closed			= vs_block_client_closed,
+	.io = {
+		.ack_read	= vs_block_client_queue_ack_read,
+		.nack_read	= vs_block_client_nack_io,
+		.ack_write	= vs_block_client_ack_write,
+		.nack_write	= vs_block_client_nack_io,
+	}
+};
+
+static int __init vs_block_client_init(void)
+{
+	int err;
+
+	block_client_major = register_blkdev(0, CLIENT_BLKDEV_NAME);
+	if (block_client_major < 0) {
+		pr_err("Err registering blkdev\n");
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	err = vservice_block_client_register(&block_client_driver,
+			"block_client_driver");
+	if (err)
+		goto fail_unregister_blkdev;
+
+	return 0;
+
+fail_unregister_blkdev:
+	unregister_blkdev(block_client_major, CLIENT_BLKDEV_NAME);
+fail:
+	return err;
+}
+
+static void __exit vs_block_client_exit(void)
+{
+	vservice_block_client_unregister(&block_client_driver);
+	unregister_blkdev(block_client_major, CLIENT_BLKDEV_NAME);
+}
+
+module_init(vs_block_client_init);
+module_exit(vs_block_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Block Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/block/vs_block_server.c b/drivers/block/vs_block_server.c
new file mode 100644
index 0000000..9d20f6a
--- /dev/null
+++ b/drivers/block/vs_block_server.c
@@ -0,0 +1,1179 @@
+/*
+ * drivers/block/vs_block_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * block vservice server driver
+ *
+ */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/server.h>
+#include <vservices/protocol/block/client.h>
+#include <vservices/service.h>
+#include <vservices/wait.h>
+
+#define VS_BLOCK_BLKDEV_DEFAULT_MODE FMODE_READ
+#define VS_BLOCK_BLK_DEF_SECTOR_SIZE 512
+
+/*
+ * Metadata for a request. Note that the bio must be embedded at the end of
+ * this structure, because it is allocated from a bioset.
+ */
+struct block_server_request {
+	struct block_server	*server;
+	u32			tagid;
+	u32			size;
+	int			op_err;
+	struct list_head	list;
+	struct vs_pbuf		pbuf;
+	struct vs_mbuf		*mbuf;
+	bool			bounced;
+	bool			submitted;
+
+	struct bio		bio;
+};
+
+struct block_server {
+	struct vs_server_block_state	server;
+	struct vs_service_device	*service;
+
+	struct block_device		*bdev;
+	struct bio_set			*bioset;
+
+	unsigned int			sector_size;
+	bool				started;
+
+	/* Bounced writes are deferred to keep memcpy off service queue */
+	struct list_head		bounce_req_queue;
+	struct work_struct		bounce_req_work;
+	spinlock_t			bounce_req_lock;
+
+	/* Count of outstanding requests submitted to block layer */
+	atomic_t			submitted_req_count;
+	wait_queue_head_t		submitted_req_wq;
+
+	/* Completions are deferred because end_io may be in atomic context */
+	struct list_head		completed_req_queue;
+	struct work_struct		completed_req_work;
+	spinlock_t			completed_req_lock;
+};
+
+#define state_to_block_server(state) \
+	container_of(state, struct block_server, server)
+
+#define dev_to_block_server(dev) \
+	state_to_block_server(dev_get_drvdata(dev))
+
+static inline vservice_block_block_io_error_t
+block_server_linux_to_vs_error(int err)
+{
+	/*
+	 * This list is not exhaustive. For all other errors, we return
+	 * unsupported_command.
+	 */
+	switch (err) {
+	case -ECOMM:
+	case -EIO:
+	case -ENOMEM:
+		return VSERVICE_BLOCK_MEDIA_FAILURE;
+	case -ETIME:
+	case -ETIMEDOUT:
+		return VSERVICE_BLOCK_MEDIA_TIMEOUT;
+	case -EILSEQ:
+		return VSERVICE_BLOCK_INVALID_INDEX;
+	default:
+		if (err)
+			return VSERVICE_BLOCK_UNSUPPORTED_COMMAND;
+		return 0;
+	}
+
+	return 0;
+}
+
+static inline u32 vs_req_num_sectors(struct block_server *server,
+		struct block_server_request *req)
+{
+	return req->size / server->sector_size;
+}
+
+static inline u64 vs_req_sector_index(struct block_server_request *req)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+	return req->bio.bi_iter.bi_sector;
+#else
+	return req->bio.bi_sector;
+#endif
+}
+
+static void vs_block_server_closed(struct vs_server_block_state *state)
+{
+	struct block_server *server = state_to_block_server(state);
+	struct block_server_request *req;
+
+	/*
+	 * Fail all requests that haven't been sent to the block layer yet.
+	 */
+	spin_lock(&server->bounce_req_lock);
+	while (!list_empty(&server->bounce_req_queue)) {
+		req = list_first_entry(&server->bounce_req_queue,
+				struct block_server_request, list);
+		list_del(&req->list);
+		spin_unlock(&server->bounce_req_lock);
+		bio_io_error(&req->bio);
+		spin_lock(&server->bounce_req_lock);
+	}
+	spin_unlock(&server->bounce_req_lock);
+
+	/*
+	 * Wait until all outstanding requests to the block layer are
+	 * complete.
+	 */
+	wait_event(server->submitted_req_wq,
+			!atomic_read(&server->submitted_req_count));
+
+	/*
+	 * Discard all the completed requests.
+	 */
+	spin_lock_irq(&server->completed_req_lock);
+	while (!list_empty(&server->completed_req_queue)) {
+		req = list_first_entry(&server->completed_req_queue,
+				struct block_server_request, list);
+		list_del(&req->list);
+		if (req->mbuf) {
+			spin_unlock_irq(&server->completed_req_lock);
+			if (bio_data_dir(&req->bio) == WRITE)
+				vs_server_block_io_free_req_write(state,
+						&req->pbuf, req->mbuf);
+			else
+				vs_server_block_io_free_ack_read(state,
+						&req->pbuf, req->mbuf);
+			spin_lock_irq(&server->completed_req_lock);
+		}
+		bio_put(&req->bio);
+	}
+	spin_unlock_irq(&server->completed_req_lock);
+}
+
+static ssize_t
+vs_block_server_readonly_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct block_server *server = dev_to_block_server(dev);
+	int err;
+	unsigned long val;
+
+	vs_service_state_lock(server->service);
+	if (server->started) {
+		err = -EBUSY;
+		goto unlock;
+	}
+
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		goto unlock;
+
+	if (bdev_read_only(server->bdev) && !val) {
+		dev_info(dev,
+				"Cannot set %s to read/write: read-only device\n",
+				server->service->name);
+		err = -EINVAL;
+		goto unlock;
+	}
+
+	server->server.readonly = val;
+	err = count;
+
+unlock:
+	vs_service_state_unlock(server->service);
+
+	return err;
+}
+
+static ssize_t
+vs_block_server_readonly_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct block_server *server = dev_to_block_server(dev);
+	int cnt;
+
+	vs_service_state_lock(server->service);
+	cnt = scnprintf(buf, PAGE_SIZE, "%d\n", server->server.readonly);
+	vs_service_state_unlock(server->service);
+
+	return cnt;
+}
+
+static ssize_t
+vs_block_server_start_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct block_server *server = dev_to_block_server(dev);
+	int err;
+	unsigned long val;
+
+	vs_service_state_lock(server->service);
+
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		goto unlock;
+
+	if (!val && server->started) {
+		err = -EBUSY;
+		goto unlock;
+	}
+
+	if (val && !server->started) {
+		server->started = true;
+
+		if (server->server.state.base.statenum ==
+				VSERVICE_BASE_STATE_CLOSED__OPEN)
+			vs_server_block_open_complete(&server->server,
+					VS_SERVER_RESP_SUCCESS);
+	}
+
+	err = count;
+unlock:
+	vs_service_state_unlock(server->service);
+
+	return err;
+}
+
+static ssize_t
+vs_block_server_start_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct block_server *server = dev_to_block_server(dev);
+	int cnt;
+
+	vs_service_state_lock(server->service);
+	cnt = scnprintf(buf, PAGE_SIZE, "%d\n", server->started);
+	vs_service_state_unlock(server->service);
+
+	return cnt;
+}
+
+static DEVICE_ATTR(start, S_IWUSR | S_IRUSR, vs_block_server_start_show,
+	vs_block_server_start_store);
+static DEVICE_ATTR(readonly, S_IWUSR | S_IRUSR, vs_block_server_readonly_show,
+	vs_block_server_readonly_store);
+
+static struct attribute *vs_block_server_dev_attrs[] = {
+	&dev_attr_start.attr,
+	&dev_attr_readonly.attr,
+	NULL,
+};
+
+static const struct attribute_group vs_block_server_attr_group = {
+	.attrs = vs_block_server_dev_attrs
+};
+
+/*
+ * Invoked by vs_server_block_handle_req_open() after receiving open
+ * requests to perform server specific initialisations
+ *
+ * The "delayed start" feature can be enforced here
+ */
+static vs_server_response_type_t
+vs_block_server_open(struct vs_server_block_state * _state)
+{
+	struct block_server *server = state_to_block_server(_state);
+
+	return (server->started) ? VS_SERVER_RESP_SUCCESS :
+				   VS_SERVER_RESP_EXPLICIT_COMPLETE;
+}
+
+static int
+vs_block_server_complete_req_read(struct block_server_request *req)
+{
+	struct block_server *server = req->server;
+	struct vs_server_block_state *state = &server->server;
+	int err = -EIO;
+
+	if (req->op_err) {
+		err = req->op_err;
+		dev_dbg(&server->service->dev,
+				"read nack, err %d sector 0x%llx num 0x%x\n",
+				err, vs_req_sector_index(req),
+				vs_req_num_sectors(server, req));
+
+		if (req->mbuf)
+			vs_server_block_io_free_ack_read(state, &req->pbuf,
+					req->mbuf);
+
+		err = vs_server_block_io_send_nack_read(state, req->tagid,
+				block_server_linux_to_vs_error(err),
+				GFP_KERNEL);
+	} else {
+		if (req->bounced && !req->mbuf) {
+			req->mbuf = vs_server_block_io_alloc_ack_read(
+					&server->server, &req->pbuf,
+					GFP_KERNEL);
+			if (IS_ERR(req->mbuf)) {
+				err = PTR_ERR(req->mbuf);
+				req->mbuf = NULL;
+			}
+		}
+
+		if (req->bounced && req->mbuf) {
+			int i;
+			struct bio_vec *bv;
+			void *data = req->pbuf.data;
+
+			if (vs_pbuf_resize(&req->pbuf, req->size) < 0) {
+				bio_io_error(&req->bio);
+				return 0;
+			}
+
+			bio_for_each_segment_all(bv, &req->bio, i) {
+				memcpy(data, page_address(bv->bv_page) +
+						bv->bv_offset, bv->bv_len);
+				data += bv->bv_len;
+				__free_page(bv->bv_page);
+			}
+			req->bounced = false;
+		}
+
+		if (req->mbuf) {
+			dev_vdbg(&server->service->dev,
+					"read ack, sector 0x%llx num 0x%x\n",
+					vs_req_sector_index(req),
+					vs_req_num_sectors(server, req));
+
+			err = vs_server_block_io_send_ack_read(state,
+					req->tagid, req->pbuf, req->mbuf);
+
+			if (err && (err != -ENOBUFS)) {
+				vs_server_block_io_free_ack_read(state,
+						&req->pbuf, req->mbuf);
+				req->mbuf = NULL;
+			}
+		} else {
+			WARN_ON(!err || !req->bounced);
+		}
+	}
+
+	if (err && (err != -ENOBUFS))
+		dev_dbg(&server->service->dev,
+				"error %d sending read reply\n", err);
+	else if (err == -ENOBUFS)
+		dev_vdbg(&server->service->dev, "out of quota, will retry\n");
+
+	return err;
+}
+
+static int
+vs_block_server_complete_req_write(struct block_server_request *req)
+{
+	struct block_server *server = req->server;
+	struct vs_server_block_state *state = &server->server;
+	int err;
+
+	WARN_ON(req->mbuf);
+
+	if (req->op_err) {
+		dev_dbg(&server->service->dev,
+				"write nack, err %d sector 0x%llx num 0x%x\n",
+				req->op_err, vs_req_sector_index(req),
+				vs_req_num_sectors(server, req));
+
+		err = vs_server_block_io_send_nack_write(state, req->tagid,
+				block_server_linux_to_vs_error(req->op_err),
+				GFP_KERNEL);
+	} else {
+		dev_vdbg(&server->service->dev,
+				"write ack, sector 0x%llx num 0x%x\n",
+				vs_req_sector_index(req),
+				vs_req_num_sectors(server, req));
+
+		err = vs_server_block_io_send_ack_write(state, req->tagid,
+				GFP_KERNEL);
+	}
+
+	if (err && (err != -ENOBUFS))
+		dev_dbg(&server->service->dev,
+				"error %d sending write reply\n", err);
+	else if (err == -ENOBUFS)
+		dev_vdbg(&server->service->dev, "out of quota, will retry\n");
+
+	return err;
+}
+
+static int vs_block_server_complete_req(struct block_server *server,
+		struct block_server_request *req)
+{
+	int err;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+	req->bio.bi_iter.bi_idx = 0;
+#else
+	req->bio.bi_idx = 0;
+#endif
+	if (!vs_state_lock_safe(&server->server))
+		return -ENOLINK;
+
+	if (bio_data_dir(&req->bio) == WRITE)
+		err = vs_block_server_complete_req_write(req);
+	else
+		err = vs_block_server_complete_req_read(req);
+
+	vs_state_unlock(&server->server);
+
+	if (err == -ENOBUFS)
+		dev_vdbg(&server->service->dev, "bio %pK response out of quota, will retry\n", &req->bio);
+
+	return err;
+}
+
+static void vs_block_server_complete_requests_work(struct work_struct *work)
+{
+	struct block_server *server = container_of(work, struct block_server,
+			completed_req_work);
+	struct block_server_request *req;
+
+	vs_service_send_batch_start(server->service, false);
+
+	/*
+	 * Send ack/nack responses for each completed request. If a request
+	 * cannot be sent because we are over-quota then this function will
+	 * return with a non-empty list, and the tx_ready handler will
+	 * reschedule us when we are back under quota. In all other cases
+	 * this function will return with an empty list.
+	 */
+	spin_lock_irq(&server->completed_req_lock);
+	while (!list_empty(&server->completed_req_queue)) {
+		int err;
+		req = list_first_entry(&server->completed_req_queue,
+				struct block_server_request, list);
+		dev_vdbg(&server->service->dev, "complete bio %pK\n", &req->bio);
+		list_del(&req->list);
+		spin_unlock_irq(&server->completed_req_lock);
+
+		err = vs_block_server_complete_req(server, req);
+		if (err == -ENOBUFS) {
+			dev_vdbg(&server->service->dev, "defer bio %pK\n", &req->bio);
+			/*
+			 * Couldn't send the completion; re-queue the request
+			 * and exit. We'll start again when more quota becomes
+			 * available.
+			 */
+			spin_lock_irq(&server->completed_req_lock);
+			list_add_tail(&req->list,
+					&server->completed_req_queue);
+			break;
+		}
+
+		dev_vdbg(&server->service->dev, "free bio %pK err %d\n", &req->bio, err);
+		bio_put(&req->bio);
+
+		spin_lock_irq(&server->completed_req_lock);
+	}
+	spin_unlock_irq(&server->completed_req_lock);
+
+	vs_service_send_batch_end(server->service, true);
+}
+
+static int vs_block_server_tx_ready(struct vs_server_block_state *state)
+{
+	struct block_server *server = state_to_block_server(state);
+
+	schedule_work(&server->completed_req_work);
+
+	return 0;
+}
+
+static bool vs_block_can_map_pbuf(struct request_queue *q,
+		struct vs_pbuf *pbuf, size_t size)
+{
+	/* The pbuf must satisfy the driver's alignment requirements. */
+	if (!blk_rq_aligned(q, (unsigned long)pbuf->data, size))
+		return false;
+
+	/*
+	 * bios can only contain pages. Sometime the pbuf is in an IO region
+	 * that has no struct page (e.g. a channel primary buffer), in which
+	 * case we can't map it into a bio.
+	 */
+	/* FIXME: Redmine issue #930 - philip. */
+	if (!pfn_valid(__pa(pbuf->data) >> PAGE_SHIFT))
+		return false;
+
+	return true;
+}
+
+static int vs_block_bio_map_pbuf(struct bio *bio, struct vs_pbuf *pbuf)
+{
+	int offset = offset_in_page((unsigned long)pbuf->data);
+	void *ptr = pbuf->data;
+	int size = pbuf->size;
+
+	while (size > 0) {
+		unsigned bytes = min_t(unsigned, PAGE_SIZE - offset, size);
+
+		if (bio_add_page(bio, virt_to_page(ptr), bytes,
+					offset) < bytes)
+			return -EIO;
+
+		ptr += bytes;
+		size -= bytes;
+		offset = 0;
+	}
+
+	return 0;
+}
+
+/* Read request handling */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+static void vs_block_server_read_done(struct bio *bio, int err)
+#else
+static void vs_block_server_read_done(struct bio *bio)
+#endif
+{
+	unsigned long flags;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	int err = bio->bi_error;
+#endif
+	struct block_server_request *req = container_of(bio,
+			struct block_server_request, bio);
+	struct block_server *server = req->server;
+	req->op_err = err;
+
+	spin_lock_irqsave(&server->completed_req_lock, flags);
+	if (req->mbuf)
+		list_add(&req->list, &server->completed_req_queue);
+	else
+		list_add_tail(&req->list, &server->completed_req_queue);
+	spin_unlock_irqrestore(&server->completed_req_lock, flags);
+
+	if (req->submitted && atomic_dec_and_test(&server->submitted_req_count))
+		wake_up_all(&server->submitted_req_wq);
+
+	schedule_work(&server->completed_req_work);
+}
+
+/*
+ * TODO: this may need to split and chain the bio if it exceeds the physical
+ * segment limit of the device. Not clear whose responsibility that is; queue
+ * might do it for us (if there is one)
+ */
+#define vs_block_make_request(bio) generic_make_request(bio)
+
+static int vs_block_submit_read(struct block_server *server,
+		struct block_server_request *req, gfp_t gfp)
+{
+	struct request_queue *q = bdev_get_queue(server->bdev);
+	struct bio *bio = &req->bio;
+	int size = req->size;
+	int err = 0;
+
+	if (req->mbuf && vs_block_can_map_pbuf(q, &req->pbuf, size)) {
+		/*
+		 * The mbuf is valid and the driver can directly access the
+		 * pbuf, so we don't need a bounce buffer. Map the pbuf
+		 * directly into the bio.
+		*/
+		if (vs_pbuf_resize(&req->pbuf, size) < 0)
+			err = -EIO;
+		if (!err)
+			err = vs_block_bio_map_pbuf(bio, &req->pbuf);
+	} else {
+		/* We need a bounce buffer. First set up the bvecs. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+		bio->bi_iter.bi_size = size;
+#else
+		bio->bi_size = size;
+#endif
+
+		while (size > 0) {
+			struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt];
+
+			BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
+
+			bvec->bv_page = NULL; /* Allocated below */
+			bvec->bv_len = min_t(unsigned, PAGE_SIZE, size);
+			bvec->bv_offset = 0;
+
+			bio->bi_vcnt++;
+			size -= bvec->bv_len;
+		}
+
+		err = bio_alloc_pages(bio, gfp);
+		if (!err) {
+			blk_recount_segments(q, bio);
+			req->bounced = true;
+		}
+	}
+
+	if (err) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+		bio->bi_error = err;
+		bio_endio(bio);
+#else
+		bio_endio(bio, err);
+#endif
+	} else {
+		dev_vdbg(&server->service->dev,
+				"submit read req sector %#llx count %#x\n",
+				vs_req_sector_index(req),
+				vs_req_num_sectors(server, req));
+		req->submitted = true;
+		atomic_inc(&server->submitted_req_count);
+		vs_block_make_request(bio);
+	}
+
+	return 0;
+}
+
+static int vs_block_server_io_req_read(struct vs_server_block_state *state,
+		u32 tagid, u64 sector_index, u32 num_sects, bool nodelay,
+		bool flush)
+{
+	struct block_server *server = state_to_block_server(state);
+	struct bio *bio;
+	struct block_server_request *req;
+	unsigned size = num_sects * server->sector_size;
+	unsigned op_flags = 0;
+
+	/*
+	 * This nr_pages calculation assumes that the pbuf data is offset from
+	 * the start of the size-aligned message buffer by more than 0 but
+	 * less than one sector, which is always true for the current message
+	 * layout generated by mill when we assume 512-byte sectors.
+	 */
+	unsigned nr_pages = 1 + (size >> PAGE_SHIFT);
+
+	bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, server->bioset);
+	if (!bio)
+		return -ENOMEM;
+	dev_vdbg(&server->service->dev, "alloc r bio %pK\n", bio);
+	req = container_of(bio, struct block_server_request, bio);
+
+	req->server = server;
+	req->tagid = tagid;
+	req->op_err = 0;
+	req->mbuf = NULL;
+	req->size = size;
+	req->bounced = false;
+	req->submitted = false;
+
+	if (flush) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+		op_flags |= REQ_PREFLUSH;
+#else
+		op_flags |= REQ_FLUSH;
+#endif
+	}
+	if (nodelay) {
+		op_flags |= REQ_SYNC;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+	bio->bi_iter.bi_sector = (sector_t)sector_index;
+#else
+	bio->bi_sector = (sector_t)sector_index;
+#endif
+	bio->bi_bdev = server->bdev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+	bio_set_op_attrs(bio, REQ_OP_READ, op_flags);
+#else
+	bio->bi_rw = READ | op_flags;
+#endif
+	bio->bi_end_io = vs_block_server_read_done;
+
+	req->mbuf = vs_server_block_io_alloc_ack_read(state, &req->pbuf,
+			GFP_KERNEL);
+	if (IS_ERR(req->mbuf) && (PTR_ERR(req->mbuf) == -ENOBUFS)) {
+		/* Fall back to a bounce buffer */
+		req->mbuf = NULL;
+	} else if (IS_ERR(req->mbuf)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+		bio->bi_error = PTR_ERR(req->mbuf);
+		bio_endio(bio);
+#else
+		bio_endio(bio, PTR_ERR(req->mbuf));
+#endif
+		return 0;
+	}
+
+	return vs_block_submit_read(server, req, GFP_KERNEL);
+}
+
+/* Write request handling */
+static int vs_block_submit_bounced_write(struct block_server *server,
+		struct block_server_request *req, gfp_t gfp)
+{
+	struct bio *bio = &req->bio;
+	void *data = req->pbuf.data;
+	struct bio_vec *bv;
+	int i;
+
+	if (bio_alloc_pages(bio, gfp | __GFP_NOWARN) < 0)
+		return -ENOMEM;
+	blk_recount_segments(bdev_get_queue(server->bdev), bio);
+	req->bounced = true;
+
+	/* Copy all the data into the bounce buffer */
+	bio_for_each_segment_all(bv, bio, i) {
+		memcpy(page_address(bv->bv_page) + bv->bv_offset, data,
+				bv->bv_len);
+		data += bv->bv_len;
+	}
+
+	vs_server_block_io_free_req_write(&server->server, &req->pbuf,
+			req->mbuf);
+	req->mbuf = NULL;
+
+	dev_vdbg(&server->service->dev,
+			"submit bounced write req sector %#llx count %#x\n",
+			vs_req_sector_index(req),
+			vs_req_num_sectors(server, req));
+	req->submitted = true;
+	atomic_inc(&server->submitted_req_count);
+	vs_block_make_request(bio);
+
+	return 0;
+}
+
+static void vs_block_server_write_bounce_work(struct work_struct *work)
+{
+	struct block_server *server = container_of(work, struct block_server,
+			bounce_req_work);
+	struct block_server_request *req;
+
+	spin_lock(&server->bounce_req_lock);
+	while (!list_empty(&server->bounce_req_queue)) {
+		req = list_first_entry(&server->bounce_req_queue,
+				struct block_server_request, list);
+		dev_vdbg(&server->service->dev, "write bio %pK\n", &req->bio);
+		list_del(&req->list);
+		spin_unlock(&server->bounce_req_lock);
+
+		if (vs_block_submit_bounced_write(server, req,
+					GFP_KERNEL) == -ENOMEM) {
+			spin_lock(&server->bounce_req_lock);
+			list_add(&req->list, &server->bounce_req_queue);
+			spin_unlock(&server->bounce_req_lock);
+			schedule_work(work);
+			return;
+		}
+
+		spin_lock(&server->bounce_req_lock);
+	}
+	spin_unlock(&server->bounce_req_lock);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+static void vs_block_server_write_done(struct bio *bio, int err)
+#else
+static void vs_block_server_write_done(struct bio *bio)
+#endif
+{
+	unsigned long flags;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	int err = bio->bi_error;
+#endif
+	struct block_server_request *req = container_of(bio,
+			struct block_server_request, bio);
+	struct block_server *server = req->server;
+
+	if (req->bounced) {
+		int i;
+		struct bio_vec *bv;
+		bio_for_each_segment_all(bv, bio, i)
+			__free_page(bv->bv_page);
+	} else if (req->mbuf) {
+		vs_server_block_io_free_req_write(&server->server, &req->pbuf,
+				req->mbuf);
+		req->mbuf = NULL;
+	}
+
+	if (req->submitted && atomic_dec_and_test(&server->submitted_req_count))
+		wake_up_all(&server->submitted_req_wq);
+
+	req->op_err = err;
+
+	spin_lock_irqsave(&server->completed_req_lock, flags);
+	list_add_tail(&req->list, &server->completed_req_queue);
+	spin_unlock_irqrestore(&server->completed_req_lock, flags);
+
+	schedule_work(&server->completed_req_work);
+}
+
+static int vs_block_server_io_req_write(struct vs_server_block_state *state,
+		u32 tagid, u64 sector_index, u32 num_sects, bool nodelay,
+		bool flush, bool commit, struct vs_pbuf pbuf, struct vs_mbuf *mbuf)
+{
+	struct block_server *server = state_to_block_server(state);
+	struct request_queue *q = bdev_get_queue(server->bdev);
+	struct bio *bio;
+	struct block_server_request *req;
+	unsigned long data = (unsigned long)pbuf.data;
+	unsigned long start = data >> PAGE_SHIFT;
+	unsigned long end = (data + pbuf.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	int err;
+	unsigned op_flags = 0;
+
+	bio = bio_alloc_bioset(GFP_KERNEL, end - start, server->bioset);
+	if (!bio)
+		return -ENOMEM;
+	dev_vdbg(&server->service->dev, "alloc w bio %pK\n", bio);
+	req = container_of(bio, struct block_server_request, bio);
+
+	req->server = server;
+	req->tagid = tagid;
+	req->op_err = 0;
+	req->mbuf = mbuf;
+	req->pbuf = pbuf;
+	req->size = server->sector_size * num_sects;
+	req->bounced = false;
+	req->submitted = false;
+
+	if (flush) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+		op_flags |= REQ_PREFLUSH;
+#else
+		op_flags |= REQ_FLUSH;
+#endif
+	}
+	if (commit) {
+		op_flags |= REQ_FUA;
+	}
+	if (nodelay) {
+		op_flags |= REQ_SYNC;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+	bio->bi_iter.bi_sector = (sector_t)sector_index;
+#else
+	bio->bi_sector = (sector_t)sector_index;
+#endif
+	bio->bi_bdev = server->bdev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+	bio_set_op_attrs(bio, REQ_OP_WRITE, op_flags);
+#else
+	bio->bi_rw = WRITE | op_flags;
+#endif
+	bio->bi_end_io = vs_block_server_write_done;
+
+	if (pbuf.size < req->size) {
+		err = -EINVAL;
+		goto fail_bio;
+	}
+	if (WARN_ON(pbuf.size > req->size))
+		pbuf.size = req->size;
+
+	if (state->readonly) {
+		err = -EROFS;
+		goto fail_bio;
+	}
+
+	if (!vs_block_can_map_pbuf(q, &req->pbuf, req->pbuf.size)) {
+		/* We need a bounce buffer. First set up the bvecs. */
+		int size = pbuf.size;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+		bio->bi_iter.bi_size = size;
+#else
+		bio->bi_size = size;
+#endif
+
+		while (size > 0) {
+			struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt];
+
+			BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
+
+			bvec->bv_page = NULL; /* Allocated later */
+			bvec->bv_len = min_t(unsigned, PAGE_SIZE, size);
+			bvec->bv_offset = 0;
+
+			bio->bi_vcnt++;
+			size -= bvec->bv_len;
+		}
+
+		/*
+		 * Defer the rest so we don't have to hold the state lock
+		 * during alloc_page & memcpy
+		 */
+		spin_lock(&server->bounce_req_lock);
+		list_add_tail(&req->list, &server->bounce_req_queue);
+		spin_unlock(&server->bounce_req_lock);
+		schedule_work(&server->bounce_req_work);
+
+		return 0;
+	}
+
+	/* No bounce needed; map the pbuf directly. */
+	err = vs_block_bio_map_pbuf(bio, &pbuf);
+	if (err < 0)
+		goto fail_bio;
+
+	dev_vdbg(&server->service->dev,
+			"submit direct write req sector %#llx count %#x\n",
+			vs_req_sector_index(req),
+			vs_req_num_sectors(server, req));
+	req->submitted = true;
+	atomic_inc(&server->submitted_req_count);
+	vs_block_make_request(bio);
+
+	return 0;
+
+fail_bio:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	bio->bi_error = err;
+	bio_endio(bio);
+#else
+	bio_endio(bio, err);
+#endif
+	return 0;
+}
+
+static struct block_device *
+vs_block_server_find_by_name(struct block_server *server)
+{
+	struct block_device *bdev = NULL;
+	struct class_dev_iter iter;
+	struct device *dev;
+
+	class_dev_iter_init(&iter, &block_class, NULL, NULL);
+	while (1) {
+		dev = class_dev_iter_next(&iter);
+		if (!dev)
+			break;
+
+		if (strcmp(dev_name(dev), server->service->name) == 0) {
+			bdev = blkdev_get_by_dev(dev->devt,
+					VS_BLOCK_BLKDEV_DEFAULT_MODE, NULL);
+			if (!IS_ERR_OR_NULL(bdev))
+				break;
+		}
+	}
+	class_dev_iter_exit(&iter);
+
+	if (!dev || IS_ERR_OR_NULL(bdev))
+		return ERR_PTR(-ENODEV);
+
+	dev_dbg(&server->service->dev, "Attached to block device %s (%d:%d)\n",
+			dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
+	return bdev;
+}
+
+static struct block_device *
+vs_block_server_find_by_path(struct block_server *server, const char *base_path)
+{
+	struct block_device *bdev;
+	char *bdev_path;
+
+	bdev_path = kasprintf(GFP_KERNEL, "%s/%s", base_path,
+			server->service->name);
+	if (!bdev_path)
+		return ERR_PTR(-ENOMEM);
+
+	bdev = blkdev_get_by_path(bdev_path, VS_BLOCK_BLKDEV_DEFAULT_MODE,
+			NULL);
+	dev_dbg(&server->service->dev, "Attached to block device %s\n",
+			bdev_path);
+
+	kfree(bdev_path);
+
+	if (!bdev)
+		return ERR_PTR(-ENODEV);
+	return bdev;
+}
+
+static struct block_device *
+vs_block_server_attach_block_device(struct block_server *server)
+{
+	const char *paths[] = {
+		"/dev",
+		"/dev/block",
+		"/dev/mapper",
+		"/dev/disk/by-partlabel",
+		"/dev/disk/by-label",
+		"/dev/disk/by-partuuid",
+		"/dev/disk/by-uuid"
+	};
+	struct block_device *bdev;
+	int i;
+
+	/*
+	 * Try first to look the block device up by path. This is done because
+	 * the name exposed to user-space in /dev/ is not necessarily the name
+	 * being used inside the kernel for the device.
+	 */
+	for (i = 0; i < ARRAY_SIZE(paths); i++) {
+		bdev = vs_block_server_find_by_path(server, paths[i]);
+		if (!IS_ERR(bdev))
+			break;
+	}
+	if (i == ARRAY_SIZE(paths)) {
+		/*
+		 * Couldn't find the block device in any of the usual places.
+		 * Try to match it against the kernel's device name. If the
+		 * name of the service and the name of a device in the block
+		 * class match then attempt to look the block device up by the
+		 * dev_t (major/minor) value.
+		 */
+		bdev = vs_block_server_find_by_name(server);
+	}
+	if (IS_ERR(bdev))
+		return bdev;
+
+	server->sector_size		= VS_BLOCK_BLK_DEF_SECTOR_SIZE;
+	server->server.segment_size	= round_down(
+		vs_service_max_mbuf_size(server->service) -
+		sizeof(vs_message_id_t), server->sector_size);
+	server->server.sector_size	= server->sector_size;
+	server->server.device_sectors	= bdev->bd_part->nr_sects;
+	if (bdev_read_only(bdev))
+		server->server.readonly = true;
+	server->server.flushable = true;
+	server->server.committable = true;
+
+	return bdev;
+}
+
+static struct vs_server_block_state *
+vs_block_server_alloc(struct vs_service_device *service)
+{
+	struct block_server *server;
+	int err;
+
+	server = kzalloc(sizeof(*server), GFP_KERNEL);
+	if (!server)
+		return NULL;
+
+	server->service = service;
+	server->started = false;
+	INIT_LIST_HEAD(&server->bounce_req_queue);
+	INIT_WORK(&server->bounce_req_work, vs_block_server_write_bounce_work);
+	spin_lock_init(&server->bounce_req_lock);
+	atomic_set(&server->submitted_req_count, 0);
+	init_waitqueue_head(&server->submitted_req_wq);
+	INIT_LIST_HEAD(&server->completed_req_queue);
+	INIT_WORK(&server->completed_req_work,
+			vs_block_server_complete_requests_work);
+	spin_lock_init(&server->completed_req_lock);
+
+	server->bdev = vs_block_server_attach_block_device(server);
+	if (IS_ERR(server->bdev)) {
+		dev_err(&server->service->dev,
+				"No appropriate block device was found to satisfy the service name %s - error %ld\n",
+				server->service->name, PTR_ERR(server->bdev));
+		goto fail_attach_device;
+	}
+
+	dev_set_drvdata(&service->dev, &server->server);
+
+	err = sysfs_create_group(&service->dev.kobj,
+				 &vs_block_server_attr_group);
+	if (err) {
+		dev_err(&service->dev,
+			"Failed to create attribute group for service %s\n",
+			service->name);
+		goto fail_create_group;
+	}
+
+	/*
+	 * We know the upper bound on simultaneously active bios (i.e. the
+	 * smaller of the in quota, and the sum of the read and write command
+	 * tag limits), so we can pre-allocate that many, and hopefully never
+	 * fail to allocate one in a request handler.
+	 *
+	 * However, allocation may fail if the number of pages (and thus
+	 * bvecs) in a request exceeds BIO_INLINE_VECS (which is hard-coded to
+	 * 4 in all mainline kernels). That possibility is the only reason we
+	 * can't enable rx_atomic for this driver.
+	 */
+	server->bioset = bioset_create(min_t(unsigned, service->recv_quota,
+				VSERVICE_BLOCK_IO_READ_MAX_PENDING +
+				VSERVICE_BLOCK_IO_WRITE_MAX_PENDING),
+			offsetof(struct block_server_request, bio));
+	if (!server->bioset) {
+		dev_err(&service->dev,
+			"Failed to allocate bioset for service %s\n",
+			service->name);
+		goto fail_create_bioset;
+	}
+
+	dev_dbg(&service->dev, "New block server %pK\n", server);
+
+	return &server->server;
+
+fail_create_bioset:
+	sysfs_remove_group(&server->service->dev.kobj,
+			   &vs_block_server_attr_group);
+fail_create_group:
+	dev_set_drvdata(&service->dev, NULL);
+	blkdev_put(server->bdev, VS_BLOCK_BLKDEV_DEFAULT_MODE);
+fail_attach_device:
+	kfree(server);
+
+	return NULL;
+}
+
+static void vs_block_server_release(struct vs_server_block_state *state)
+{
+	struct block_server *server = state_to_block_server(state);
+
+	cancel_work_sync(&server->bounce_req_work);
+	cancel_work_sync(&server->completed_req_work);
+
+	blkdev_put(server->bdev, VS_BLOCK_BLKDEV_DEFAULT_MODE);
+
+	sysfs_remove_group(&server->service->dev.kobj,
+			   &vs_block_server_attr_group);
+
+	bioset_free(server->bioset);
+
+	kfree(server);
+}
+
+static struct vs_server_block block_server_driver = {
+	.alloc			= vs_block_server_alloc,
+	.release		= vs_block_server_release,
+	.open			= vs_block_server_open,
+	.closed			= vs_block_server_closed,
+	.tx_ready		= vs_block_server_tx_ready,
+	.io = {
+		.req_read	= vs_block_server_io_req_read,
+		.req_write	= vs_block_server_io_req_write,
+	},
+
+	/* Large default quota for batching read/write commands */
+	.in_quota_best		= 32,
+	.out_quota_best		= 32,
+};
+
+static int __init vs_block_server_init(void)
+{
+	return vservice_block_server_register(&block_server_driver,
+			"block_server_driver");
+}
+
+static void __exit vs_block_server_exit(void)
+{
+	vservice_block_server_unregister(&block_server_driver);
+}
+
+module_init(vs_block_server_init);
+module_exit(vs_block_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Block Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index b54b566..b90bbfe 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -70,6 +70,7 @@
 		rc = PTR_ERR(vreg->reg);
 		pr_err("%s: regulator_get(%s) failed. rc=%d\n",
 			__func__, vreg->name, rc);
+		vreg->reg = NULL;
 		goto out;
 	}
 
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 44bccb1..8dce1a8 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -349,6 +349,7 @@
 	{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
 
 	/* Additional Realtek 8723DE Bluetooth devices */
+	{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
 
 	/* Additional Realtek 8821AE Bluetooth devices */
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 0986c32..4c40fa2 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -231,11 +231,11 @@
 
 	BT_DBG("hu %p wq awake device", hu);
 
+	spin_lock(&qca->hci_ibs_lock);
+
 	/* Vote for serial clock */
 	serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
 
-	spin_lock(&qca->hci_ibs_lock);
-
 	/* Send wake indication to device */
 	if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
 		BT_ERR("Failed to send WAKE to device");
@@ -260,9 +260,10 @@
 
 	BT_DBG("hu %p wq awake rx", hu);
 
+	spin_lock(&qca->hci_ibs_lock);
+
 	serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
 
-	spin_lock(&qca->hci_ibs_lock);
 	qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
 
 	/* Always acknowledge device wake up,
@@ -287,7 +288,11 @@
 
 	BT_DBG("hu %p rx clock vote off", hu);
 
+	spin_lock(&qca->hci_ibs_lock);
+
 	serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
+
+	spin_unlock(&qca->hci_ibs_lock);
 }
 
 static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
@@ -298,6 +303,8 @@
 
 	BT_DBG("hu %p tx clock vote off", hu);
 
+	spin_lock(&qca->hci_ibs_lock);
+
 	/* Run HCI tx handling unlocked */
 	hci_uart_tx_wakeup(hu);
 
@@ -305,6 +312,8 @@
 	 * It is up to the tty driver to pend the clocks off until tx done.
 	 */
 	serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
+
+	spin_unlock(&qca->hci_ibs_lock);
 }
 
 static void hci_ibs_tx_idle_timeout(unsigned long arg)
@@ -520,8 +529,12 @@
 
 	BT_DBG("hu %p qca close", hu);
 
+	spin_lock(&qca->hci_ibs_lock);
+
 	serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
 
+	spin_unlock(&qca->hci_ibs_lock);
+
 	skb_queue_purge(&qca->tx_wait_q);
 	skb_queue_purge(&qca->txq);
 	del_timer(&qca->tx_idle_timer);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index b0d0181..87d8c3c 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -623,3 +623,49 @@
 
 endmenu
 
+config OKL4_PIPE
+      bool "OKL4 Pipe Driver"
+      depends on OKL4_GUEST
+      default n
+      help
+        Virtual pipe driver for the OKL4 Microvisor. This driver allows
+        OKL4 Microvisor pipes to be exposed directly to user level as
+        character devices.
+
+config VSERVICES_SERIAL
+	tristate
+
+config VSERVICES_SERIAL_SERVER
+	tristate "Virtual Services serial server"
+	depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+	select VSERVICES_SERIAL
+	select VSERVICES_PROTOCOL_SERIAL_SERVER
+	default y
+	help
+	  Select this option if you want support for server side Virtual
+	  Services serial. A virtual serial service behaves similarly to
+	  a UNIX pseudo terminal (pty), and does not require any physical
+	  serial hardware. Virtual serial devices are typically called
+	  /dev/ttyVS0, /dev/ttyVS1, etc.
+
+config VSERVICES_SERIAL_CLIENT
+	tristate "Virtual Services serial client"
+	depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+	select VSERVICES_SERIAL
+	select VSERVICES_PROTOCOL_SERIAL_CLIENT
+	default y
+	help
+	  Select this option if you want support for client side Virtual
+	  Services serial. A virtual serial service behaves similarly to
+	  a UNIX pseudo terminal (pty), and does not require any physical
+	  serial hardware. Virtual serial devices are typically called
+	  /dev/ttyVS0, /dev/ttyVS1, etc.
+
+config VSERVICES_VTTY_COUNT
+	int "Maximum number of Virtual Services serial devices"
+	depends on VSERVICES_SERIAL
+	range 0 256
+	default "8"
+	help
+	  The maximum number of Virtual Services serial devices to support.
+	  This limit applies to both the client and server.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 81283c4..a00142a 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -67,3 +67,11 @@
 obj-$(CONFIG_MSM_ADSPRPC)       += adsprpc_compat.o
 endif
 obj-$(CONFIG_MSM_RDBG)		+= rdbg.o
+obj-$(CONFIG_OKL4_PIPE)		+= okl4_pipe.o
+CFLAGS_okl4_pipe.o			+= -Werror
+obj-$(CONFIG_VSERVICES_SERIAL)		+= vservices_serial.o
+CFLAGS_vservices_serial.o	+= -Werror
+obj-$(CONFIG_VSERVICES_SERIAL_CLIENT)	+= vs_serial_client.o
+CFLAGS_vs_serial_client.o	 += -Werror
+obj-$(CONFIG_VSERVICES_SERIAL_SERVER)	+= vs_serial_server.o
+CFLAGS_vs_serial_server.o	+= -Werror
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 67677ea..b5af2e2 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -645,21 +645,20 @@
 	return -ENOTTY;
 }
 
-static int dma_alloc_memory(dma_addr_t *region_phys, size_t size,
+static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size,
 			unsigned long dma_attrs)
 {
 	struct fastrpc_apps *me = &gfa;
-	void *vaddr = NULL;
 
 	if (me->dev == NULL) {
 		pr_err("device adsprpc-mem is not initialized\n");
 		return -ENODEV;
 	}
-	vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
+	*vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
 								dma_attrs);
-	if (!vaddr) {
-		pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
-						(unsigned int)size);
+	if (IS_ERR_OR_NULL(*vaddr)) {
+		pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
+				current->comm, __func__, size, (*vaddr));
 		return -ENOMEM;
 	}
 	return 0;
@@ -798,6 +797,7 @@
 	struct fastrpc_mmap *map = NULL;
 	unsigned long attrs;
 	dma_addr_t region_phys = 0;
+	void *region_vaddr = NULL;
 	unsigned long flags;
 	int err = 0, vmid;
 
@@ -820,12 +820,13 @@
 
 		map->apps = me;
 		map->fl = NULL;
-		VERIFY(err, !dma_alloc_memory(&region_phys, len, dma_attrs));
+		VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
+				 len, dma_attrs));
 		if (err)
 			goto bail;
 		map->phys = (uintptr_t)region_phys;
 		map->size = len;
-		map->va = (uintptr_t)map->phys;
+		map->va = (uintptr_t)region_vaddr;
 	} else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
 		ion_phys_addr_t iphys;
 
@@ -2707,14 +2708,15 @@
 								1, &rbuf);
 		if (err)
 			goto bail;
-		rbuf->virt = NULL;
-		err = fastrpc_mmap_on_dsp(fl, ud->flags,
-				(uintptr_t)rbuf->virt,
+		err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
 				rbuf->phys, rbuf->size, &raddr);
 		if (err)
 			goto bail;
 		rbuf->raddr = raddr;
 	} else {
+
+		uintptr_t va_to_dsp;
+
 		mutex_lock(&fl->fl_map_mutex);
 		if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
 				 ud->size, ud->flags, 1, &map)) {
@@ -2722,13 +2724,20 @@
 			mutex_unlock(&fl->map_mutex);
 			return 0;
 		}
+
 		VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
 				(uintptr_t)ud->vaddrin, ud->size,
 				 ud->flags, &map));
 		mutex_unlock(&fl->fl_map_mutex);
 		if (err)
 			goto bail;
-		VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map->va,
+
+		if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
+				ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
+			va_to_dsp = 0;
+		else
+			va_to_dsp = (uintptr_t)map->va;
+		VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
 				map->phys, map->size, &raddr));
 		if (err)
 			goto bail;
diff --git a/drivers/char/diag/Kconfig b/drivers/char/diag/Kconfig
index a7b6e75..07d675e 100644
--- a/drivers/char/diag/Kconfig
+++ b/drivers/char/diag/Kconfig
@@ -37,7 +37,7 @@
 config DIAG_OVER_PCIE
 	bool "Enable Diag traffic to go over PCIE"
 	depends on DIAG_CHAR
-	depends on MSM_MHI
+	depends on MSM_MHI_DEV
 	help
 	  Diag over PCIE enables sending diag traffic over PCIE endpoint when
 	  pcie is available. Diag PCIE channels should be configured
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index 52a57bb..ae59175 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -127,7 +127,7 @@
 	pcie_logger.ops[proc] = ops;
 	err = diag_pcie_register(proc, ctx, ops);
 	if (err) {
-		driver->transport_set == DIAG_ROUTE_TO_USB;
+		driver->transport_set = DIAG_ROUTE_TO_USB;
 		diag_mux->logger = &usb_logger;
 		diag_mux->mode = DIAG_USB_MODE;
 		usb_logger.ops[proc] = ops;
diff --git a/drivers/char/diag/diag_pcie.c b/drivers/char/diag/diag_pcie.c
index 8353abc..8f53573 100644
--- a/drivers/char/diag/diag_pcie.c
+++ b/drivers/char/diag/diag_pcie.c
@@ -33,7 +33,7 @@
 	{
 		.id = DIAG_PCIE_LOCAL,
 		.name = DIAG_LEGACY,
-		.enabled = 0,
+		.enabled = {0},
 		.mempool = POOL_TYPE_MUX_APPS,
 		.ops = NULL,
 		.wq = NULL,
@@ -171,6 +171,8 @@
 		diag_ws_on_copy_complete(DIAG_WS_MUX);
 		spin_unlock_irqrestore(&ch->write_lock, flags);
 		diagmem_free(driver, req, ch->mempool);
+		kfree(ctxt);
+		ctxt = NULL;
 		return;
 	}
 	DIAG_LOG(DIAG_DEBUG_MUX, "full write_done, ctxt: %pK\n",
@@ -281,19 +283,19 @@
 		bytes_to_write = mhi_dev_write_channel(req);
 		diag_ws_on_copy(DIAG_WS_MUX);
 		if (bytes_to_write != write_len) {
-			pr_err_ratelimited("diag: In %s, error writing to pcie channel %s, err: %d\n",
+			pr_err_ratelimited("diag: In %s, error writing to pcie channel %s, err: %d, write_len: %d\n",
 					   __func__, pcie_info->name,
-					bytes_to_write);
+					bytes_to_write, write_len);
 			DIAG_LOG(DIAG_DEBUG_MUX,
-				 "ERR! unable to write to pcie, err: %d\n",
-				bytes_to_write);
+				 "ERR! unable to write to pcie, err: %d, write_len: %d\n",
+				bytes_to_write, write_len);
 			diag_ws_on_copy_fail(DIAG_WS_MUX);
 			spin_lock_irqsave(&pcie_info->write_lock, flags);
 			diag_pcie_buf_tbl_remove(pcie_info, buf);
 			kfree(req->context);
 			diagmem_free(driver, req, pcie_info->mempool);
 			spin_unlock_irqrestore(&pcie_info->write_lock, flags);
-			return bytes_to_write;
+			return -EINVAL;
 		}
 		offset += write_len;
 		bytes_remaining -= write_len;
@@ -365,17 +367,18 @@
 	bytes_to_write = mhi_dev_write_channel(req);
 	diag_ws_on_copy(DIAG_WS_MUX);
 	if (bytes_to_write != len) {
-		pr_err_ratelimited("diag: In %s, error writing to pcie channel %s, err: %d\n",
-				   __func__, pcie_info->name, bytes_to_write);
+		pr_err_ratelimited("diag: In %s, error writing to pcie channel %s, err: %d len: %d\n",
+			__func__, pcie_info->name, bytes_to_write, len);
 		diag_ws_on_copy_fail(DIAG_WS_MUX);
 		DIAG_LOG(DIAG_DEBUG_MUX,
-			 "ERR! unable to write to pcie, err: %d\n",
-			bytes_to_write);
+			 "ERR! unable to write to pcie, err: %d len: %d\n",
+			bytes_to_write, len);
 		spin_lock_irqsave(&pcie_info->write_lock, flags);
 		diag_pcie_buf_tbl_remove(pcie_info, buf);
 		spin_unlock_irqrestore(&pcie_info->write_lock, flags);
 		kfree(req->context);
 		diagmem_free(driver, req, pcie_info->mempool);
+		return -EINVAL;
 	}
 	DIAG_LOG(DIAG_DEBUG_MUX, "wrote packet to pcie chan:%d, len:%d",
 		pcie_info->out_chan, len);
@@ -475,10 +478,6 @@
 
 static void diag_pcie_connect(struct diag_pcie_info *ch)
 {
-	int err = 0;
-	int num_write = 0;
-	int num_read = 1; /* Only one read buffer for any pcie channel */
-
 	if (!ch || !atomic_read(&ch->enabled))
 		return;
 
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index 24a9f8a..f960e8b 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -219,13 +219,6 @@
  */
 static void usb_disconnect(struct diag_usb_info *ch)
 {
-	if (!ch)
-		return;
-
-	if (!atomic_read(&ch->connected) &&
-		driver->usb_connected && diag_mask_param())
-		diag_clear_masks(0);
-
 	if (ch && ch->ops && ch->ops->close)
 		ch->ops->close(ch->ctxt, DIAG_USB_MODE);
 }
@@ -234,6 +227,14 @@
 {
 	struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
 						disconnect_work);
+
+	if (!ch)
+		return;
+
+	if (!atomic_read(&ch->connected) &&
+		driver->usb_connected && diag_mask_param())
+		diag_clear_masks(0);
+
 	usb_disconnect(ch);
 }
 
diff --git a/drivers/char/okl4_pipe.c b/drivers/char/okl4_pipe.c
new file mode 100644
index 0000000..e7a0d8a
--- /dev/null
+++ b/drivers/char/okl4_pipe.c
@@ -0,0 +1,677 @@
+/*
+ * drivers/char/okl4_pipe.c
+ *
+ * Copyright (c) 2015 General Dynamics
+ * Copyright (c) 2015 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * OKL4 Microvisor Pipes driver.
+ *
+ * Clients using this driver must have vclient names of the form
+ * "pipe%d", where %d is the pipe number, which must be
+ * unique and less than MAX_PIPES.
+ */
+
+/* #define DEBUG 1 */
+/* #define VERBOSE_DEBUG 1 */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/of.h>
+#include <asm/uaccess.h>
+#include <asm-generic/okl4_virq.h>
+
+#include <microvisor/microvisor.h>
+#if defined(CONFIG_OKL4_VIRTUALISATION)
+#include <asm/okl4-microvisor/okl4tags.h>
+#include <asm/okl4-microvisor/microvisor_bus.h>
+#include <asm/okl4-microvisor/virq.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
+#define __devinit
+#define __devexit
+#define __devexit_p(x) x
+#endif
+
+#define DRIVER_NAME "okl4-pipe"
+#define DEVICE_NAME "okl4-pipe"
+
+#ifndef CONFIG_OF
+#error "okl4-pipe driver only supported on device tree kernels"
+#endif
+
+#define MAX_PIPES 8
+
+#ifdef CONFIG_OKL4_INTERLEAVED_PRIORITIES
+extern int vcpu_prio_normal;
+#endif
+
+static int okl4_pipe_major;
+static struct class *okl4_pipe_class;
+
+/* This can be extended if required */
+struct okl4_pipe_mv {
+	int pipe_id;
+};
+
+struct okl4_pipe {
+	struct okl4_pipe_data_buffer *write_buf;
+	okl4_kcap_t pipe_tx_kcap;
+	okl4_kcap_t pipe_rx_kcap;
+	int tx_irq;
+	int rx_irq;
+	size_t max_msg_size;
+	int ref_count;
+	struct mutex pipe_mutex;
+	spinlock_t pipe_lock;
+
+	struct platform_device *pdev;
+	struct cdev cdev;
+
+	bool reset;
+	bool tx_maybe_avail;
+	bool rx_maybe_avail;
+
+	wait_queue_head_t rx_wait_q;
+	wait_queue_head_t tx_wait_q;
+	wait_queue_head_t poll_wait_q;
+
+	char *rx_buf;
+	size_t rx_buf_count;
+};
+static struct okl4_pipe pipes[MAX_PIPES];
+
+static okl4_error_t
+okl4_pipe_control(okl4_kcap_t kcap, uint8_t control)
+{
+	okl4_pipe_control_t x = 0;
+
+	okl4_pipe_control_setdoop(&x, true);
+	okl4_pipe_control_setoperation(&x, control);
+	return _okl4_sys_pipe_control(kcap, x);
+}
+
+static irqreturn_t
+okl4_pipe_tx_irq(int irq, void *dev)
+{
+	struct okl4_pipe *pipe = dev;
+	okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+	spin_lock(&pipe->pipe_lock);
+	if (okl4_pipe_state_gettxavailable(&payload))
+		pipe->tx_maybe_avail = true;
+	if (okl4_pipe_state_getreset(&payload)) {
+		pipe->reset = true;
+		pipe->tx_maybe_avail = true;
+	}
+	spin_unlock(&pipe->pipe_lock);
+
+	wake_up_interruptible(&pipe->tx_wait_q);
+	wake_up_interruptible(&pipe->poll_wait_q);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+okl4_pipe_rx_irq(int irq, void *dev)
+{
+	struct okl4_pipe *pipe = dev;
+	okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+	spin_lock(&pipe->pipe_lock);
+	if (okl4_pipe_state_getrxavailable(&payload))
+		pipe->rx_maybe_avail = true;
+	if (okl4_pipe_state_getreset(&payload)) {
+		pipe->reset = true;
+		pipe->rx_maybe_avail = true;
+	}
+	spin_unlock(&pipe->pipe_lock);
+
+	wake_up_interruptible(&pipe->rx_wait_q);
+	wake_up_interruptible(&pipe->poll_wait_q);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t
+okl4_pipe_read(struct file *filp, char __user *buf, size_t count,
+		loff_t *f_pos)
+{
+	struct okl4_pipe_mv *priv = filp->private_data;
+	int pipe_id = priv->pipe_id;
+	struct okl4_pipe *pipe = &pipes[pipe_id];
+	struct _okl4_sys_pipe_recv_return recv_return;
+	uint32_t *buffer = NULL;
+	size_t recv = 0;
+
+	if (!count)
+		return 0;
+
+again:
+	if (pipe->reset)
+		return -EPIPE;
+
+	if (!pipe->rx_maybe_avail && (filp->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	if (wait_event_interruptible(pipe->rx_wait_q, pipe->rx_maybe_avail))
+		return -ERESTARTSYS;
+
+	if (mutex_lock_interruptible(&pipe->pipe_mutex))
+		return -ERESTARTSYS;
+
+	/* Receive buffered data first */
+	if (pipe->rx_buf_count) {
+		recv = min(pipe->rx_buf_count, count);
+
+		if (copy_to_user(buf, pipe->rx_buf, recv)) {
+			mutex_unlock(&pipe->pipe_mutex);
+			return -EFAULT;
+		}
+
+		pipe->rx_buf_count -= recv;
+
+		if (pipe->rx_buf_count) {
+			memmove(pipe->rx_buf, pipe->rx_buf + recv,
+				pipe->max_msg_size - recv);
+		}
+
+		buf += recv;
+		count -= recv;
+		if (!count) {
+			mutex_unlock(&pipe->pipe_mutex);
+			return recv;
+		}
+	}
+
+	buffer = kmalloc(pipe->max_msg_size + sizeof(uint32_t), GFP_KERNEL);
+
+	if (!buffer) {
+		mutex_unlock(&pipe->pipe_mutex);
+		return -ENOMEM;
+	}
+
+	while (count) {
+		okl4_error_t ret;
+		size_t size;
+
+		spin_lock_irq(&pipe->pipe_lock);
+		recv_return = _okl4_sys_pipe_recv(pipe->pipe_rx_kcap,
+				pipe->max_msg_size + sizeof(uint32_t),
+				(void *)buffer);
+		ret = recv_return.error;
+
+		if (ret == OKL4_ERROR_PIPE_NOT_READY ||
+				ret == OKL4_ERROR_PIPE_EMPTY) {
+			pipe->rx_maybe_avail = false;
+			if (!recv) {
+				if (!(filp->f_flags & O_NONBLOCK)) {
+					spin_unlock_irq(&pipe->pipe_lock);
+					mutex_unlock(&pipe->pipe_mutex);
+					kfree(buffer);
+					goto again;
+				}
+				recv = -EAGAIN;
+			}
+			goto error;
+		} else if (ret != OKL4_OK) {
+			dev_err(&pipe->pdev->dev,
+					"pipe send returned error %d in okl4_pipe driver!\n",
+					(int)ret);
+			if (!recv)
+				recv = -ENXIO;
+			goto error;
+		}
+
+		spin_unlock_irq(&pipe->pipe_lock);
+
+		size = buffer[0];
+		if (size > pipe->max_msg_size) {
+			/* pipe error */
+			if (!recv)
+				recv = -EPROTO;
+			goto out;
+		}
+
+		/* Save extra received data */
+		if (size > count) {
+			pipe->rx_buf_count = size - count;
+			memcpy(pipe->rx_buf, (char*)&buffer[1] + count,
+					size - count);
+			size = count;
+		}
+
+		if (copy_to_user(buf, &buffer[1], size)) {
+			if (!recv)
+				recv = -EFAULT;
+			goto out;
+		}
+
+
+		count -= size;
+		buf += size;
+		recv += size;
+	}
+out:
+	mutex_unlock(&pipe->pipe_mutex);
+
+	kfree(buffer);
+	return recv;
+error:
+	spin_unlock_irq(&pipe->pipe_lock);
+	goto out;
+}
+
+static ssize_t
+okl4_pipe_write(struct file *filp, const char __user *buf, size_t count,
+		loff_t *f_pos)
+{
+	struct okl4_pipe_mv *priv = filp->private_data;
+	int pipe_id = priv->pipe_id;
+	struct okl4_pipe *pipe = &pipes[pipe_id];
+	uint32_t *buffer = NULL;
+	size_t sent = 0;
+
+	if (!count)
+		return 0;
+
+again:
+	if (pipe->reset)
+		return -EPIPE;
+
+	if (!pipe->tx_maybe_avail && (filp->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	if (wait_event_interruptible(pipe->tx_wait_q, pipe->tx_maybe_avail))
+		return -ERESTARTSYS;
+
+	if (mutex_lock_interruptible(&pipe->pipe_mutex))
+		return -ERESTARTSYS;
+
+	buffer = kmalloc(pipe->max_msg_size + sizeof(uint32_t), GFP_KERNEL);
+
+	if (!buffer) {
+		mutex_unlock(&pipe->pipe_mutex);
+		return -ENOMEM;
+	}
+
+	while (count) {
+		okl4_error_t ret;
+		size_t size = min(count, pipe->max_msg_size);
+		size_t pipe_size = roundup(size + sizeof(uint32_t),
+				sizeof(uint32_t));
+
+		if (copy_from_user(&buffer[1], buf, size)) {
+			if (!sent)
+				sent = -EFAULT;
+			break;
+		}
+
+		buffer[0] = size;
+
+		spin_lock_irq(&pipe->pipe_lock);
+		ret = _okl4_sys_pipe_send(pipe->pipe_tx_kcap, pipe_size,
+				(void *)buffer);
+		if (ret == OKL4_ERROR_PIPE_NOT_READY ||
+				ret == OKL4_ERROR_PIPE_FULL) {
+			pipe->tx_maybe_avail = false;
+			spin_unlock_irq(&pipe->pipe_lock);
+			if (!sent) {
+				if (filp->f_flags & O_NONBLOCK) {
+					sent = -EAGAIN;
+					break;
+				}
+				mutex_unlock(&pipe->pipe_mutex);
+				kfree(buffer);
+				goto again;
+			}
+			break;
+		} else if (ret != OKL4_OK) {
+			dev_err(&pipe->pdev->dev,
+					"pipe send returned error %d in okl4_pipe driver!\n",
+					(int)ret);
+			if (!sent)
+				sent = -ENXIO;
+			spin_unlock_irq(&pipe->pipe_lock);
+			break;
+		}
+		spin_unlock_irq(&pipe->pipe_lock);
+
+		count -= size;
+		buf += size;
+		sent += size;
+	}
+	mutex_unlock(&pipe->pipe_mutex);
+
+	kfree(buffer);
+	return sent;
+}
+
+
+static unsigned int
+okl4_pipe_poll(struct file *filp, struct poll_table_struct *poll_table)
+{
+	struct okl4_pipe_mv *priv = filp->private_data;
+	int pipe_id = priv->pipe_id;
+	struct okl4_pipe *pipe = &pipes[pipe_id];
+	unsigned int ret = 0;
+
+	poll_wait(filp, &pipe->poll_wait_q, poll_table);
+
+	spin_lock_irq(&pipe->pipe_lock);
+
+	if (pipe->rx_maybe_avail)
+		ret |= POLLIN | POLLRDNORM;
+	if (pipe->tx_maybe_avail)
+		ret |= POLLOUT | POLLWRNORM;
+	if (pipe->reset)
+		ret = POLLHUP;
+
+	spin_unlock_irq(&pipe->pipe_lock);
+
+	return ret;
+}
+
+static int
+okl4_pipe_open(struct inode *inode, struct file *filp)
+{
+	struct okl4_pipe *pipe = container_of(inode->i_cdev,
+			struct okl4_pipe, cdev);
+	struct okl4_pipe_mv *priv = dev_get_drvdata(&pipe->pdev->dev);
+
+	filp->private_data = priv;
+	if (!pipe->ref_count) {
+		pipe->rx_buf = kmalloc(pipe->max_msg_size, GFP_KERNEL);
+		if (!pipe->rx_buf)
+			return -ENOMEM;
+
+		mutex_init(&pipe->pipe_mutex);
+		spin_lock_init(&pipe->pipe_lock);
+
+		pipe->rx_buf_count = 0;
+		pipe->reset = false;
+		pipe->tx_maybe_avail = true;
+		pipe->rx_maybe_avail = true;
+
+		okl4_pipe_control(pipe->pipe_tx_kcap,
+				OKL4_PIPE_CONTROL_OP_SET_TX_READY);
+		okl4_pipe_control(pipe->pipe_rx_kcap,
+				OKL4_PIPE_CONTROL_OP_SET_RX_READY);
+	}
+	pipe->ref_count++;
+	return 0;
+}
+
+static int
+okl4_pipe_close(struct inode *inode, struct file *filp)
+{
+	struct okl4_pipe *pipe = container_of(inode->i_cdev,
+			struct okl4_pipe, cdev);
+
+	pipe->ref_count--;
+	if (!pipe->ref_count) {
+		okl4_pipe_control(pipe->pipe_rx_kcap,
+				OKL4_PIPE_CONTROL_OP_RESET);
+		okl4_pipe_control(pipe->pipe_tx_kcap,
+				OKL4_PIPE_CONTROL_OP_RESET);
+
+		if (pipe->rx_buf)
+			kfree(pipe->rx_buf);
+		pipe->rx_buf = NULL;
+		pipe->rx_buf_count = 0;
+	}
+
+	return 0;
+}
+
+struct file_operations okl4_pipe_fops = {
+	.owner =	THIS_MODULE,
+	.read =		okl4_pipe_read,
+	.write =	okl4_pipe_write,
+	.open =		okl4_pipe_open,
+	.release =	okl4_pipe_close,
+	.poll =		okl4_pipe_poll,
+};
+
+static int __devinit
+okl4_pipe_probe(struct platform_device *pdev)
+{
+	struct okl4_pipe *pipe;
+	int err, pipe_id;
+	struct okl4_pipe_mv *priv;
+	dev_t dev_num;
+	struct device *device = NULL;
+	u32 reg[2];
+	struct resource *irq;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct okl4_pipe_mv),
+			GFP_KERNEL);
+	if (priv == NULL) {
+		err = -ENOMEM;
+		goto fail_alloc_priv;
+	}
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	pipe_id = of_alias_get_id(pdev->dev.of_node, "pipe");
+	if (pipe_id < 0) {
+		err = -ENXIO;
+		goto fail_pipe_id;
+	}
+
+	if (pipe_id < 0 || pipe_id >= MAX_PIPES) {
+		err = -ENXIO;
+		goto fail_pipe_id;
+	}
+
+	if (of_property_read_u32_array(pdev->dev.of_node, "reg", reg, 2)) {
+		dev_err(&pdev->dev, "need 2 reg resources\n");
+		err = -ENODEV;
+		goto fail_pipe_id;
+	}
+
+	/* Populate the private structure */
+	priv->pipe_id = pipe_id;
+
+	pipe = &pipes[pipe_id];
+
+	/* Set up and register the pipe device */
+	pipe->pdev = pdev;
+	dev_set_name(&pdev->dev, "%s%d", DEVICE_NAME, (int)pipe_id);
+
+	pipe->ref_count = 0;
+	pipe->pipe_tx_kcap = reg[0];
+	pipe->pipe_rx_kcap = reg[1];
+	pipe->max_msg_size = 64;
+
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq) {
+		dev_err(&pdev->dev, "no tx irq resource?\n");
+		err = -ENODEV;
+		goto fail_irq_resource;
+	}
+	pipe->tx_irq = irq->start;
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+	if (!irq) {
+		dev_err(&pdev->dev, "no rx irq resource?\n");
+		err = -ENODEV;
+		goto fail_irq_resource;
+	}
+	pipe->rx_irq = irq->start;
+
+	pipe->write_buf = kmalloc(sizeof(pipe->write_buf), GFP_KERNEL);
+	if (!pipe->write_buf) {
+		dev_err(&pdev->dev, "cannot allocate write buffer\n");
+		err = -ENOMEM;
+		goto fail_malloc_write;
+	}
+
+	init_waitqueue_head(&pipe->rx_wait_q);
+	init_waitqueue_head(&pipe->tx_wait_q);
+	init_waitqueue_head(&pipe->poll_wait_q);
+
+	err = devm_request_irq(&pdev->dev, pipe->rx_irq,
+			okl4_pipe_rx_irq, 0, dev_name(&pdev->dev),
+			pipe);
+	if (err) {
+		dev_err(&pdev->dev, "cannot register rx irq %d: %d\n",
+				(int)pipe->rx_irq, (int)err);
+		goto fail_request_rx_irq;
+	}
+
+	err = devm_request_irq(&pdev->dev, pipe->tx_irq,
+			okl4_pipe_tx_irq, 0, dev_name(&pdev->dev),
+			pipe);
+	if (err) {
+		dev_err(&pdev->dev, "cannot register tx irq %d: %d\n",
+				(int)pipe->tx_irq, (int)err);
+		goto fail_request_tx_irq;
+	}
+
+	dev_num = MKDEV(okl4_pipe_major, pipe_id);
+
+	cdev_init(&pipe->cdev, &okl4_pipe_fops);
+	pipe->cdev.owner = THIS_MODULE;
+	err = cdev_add(&pipe->cdev, dev_num, 1);
+	if (err) {
+		dev_err(&pdev->dev, "cannot add device: %d\n", (int)err);
+		goto fail_cdev_add;
+	}
+
+	device = device_create(okl4_pipe_class, NULL, dev_num, NULL,
+			DEVICE_NAME "%d", pipe_id);
+	if (IS_ERR(device)) {
+		err = PTR_ERR(device);
+		dev_err(&pdev->dev, "cannot create device: %d\n", (int)err);
+		goto fail_device_create;
+	}
+
+	return 0;
+
+fail_device_create:
+	cdev_del(&pipe->cdev);
+fail_cdev_add:
+	devm_free_irq(&pdev->dev, pipe->tx_irq, pipe);
+fail_request_tx_irq:
+	devm_free_irq(&pdev->dev, pipe->rx_irq, pipe);
+fail_request_rx_irq:
+	kfree(pipe->write_buf);
+fail_malloc_write:
+fail_irq_resource:
+fail_pipe_id:
+	dev_set_drvdata(&pdev->dev, NULL);
+	devm_kfree(&pdev->dev, priv);
+fail_alloc_priv:
+	return err;
+}
+
+static int __devexit
+okl4_pipe_remove(struct platform_device *pdev)
+{
+	struct okl4_pipe *pipe;
+	struct okl4_pipe_mv *priv = dev_get_drvdata(&pdev->dev);
+
+	if (priv->pipe_id < 0 || priv->pipe_id >= MAX_PIPES)
+		return -ENXIO;
+
+	pipe = &pipes[priv->pipe_id];
+
+	cdev_del(&pipe->cdev);
+
+	devm_free_irq(&pdev->dev, pipe->tx_irq, pipe);
+	devm_free_irq(&pdev->dev, pipe->rx_irq, pipe);
+
+	kfree(pipe->write_buf);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+	devm_kfree(&pdev->dev, priv);
+
+	return 0;
+}
+
+static const struct of_device_id okl4_pipe_match[] = {
+	{
+		.compatible = "okl,pipe",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, okl4_pipe_match);
+
+static struct platform_driver okl4_pipe_driver = {
+	.probe		= okl4_pipe_probe,
+	.remove		= __devexit_p(okl4_pipe_remove),
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = okl4_pipe_match,
+	},
+};
+
+static int __init
+okl4_pipe_init(void)
+{
+	int err;
+	dev_t dev_num = 0;
+
+	err = alloc_chrdev_region(&dev_num, 0, MAX_PIPES, DEVICE_NAME);
+	if (err < 0) {
+		printk("%s: cannot allocate device region\n", __func__);
+		goto fail_alloc_chrdev_region;
+	}
+	okl4_pipe_major = MAJOR(dev_num);
+
+	okl4_pipe_class = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(okl4_pipe_class)) {
+		err = PTR_ERR(okl4_pipe_class);
+		goto fail_class_create;
+	}
+
+	/* Register the driver with the microvisor bus */
+	err = platform_driver_register(&okl4_pipe_driver);
+	if (err)
+		goto fail_driver_register;
+
+	return 0;
+
+fail_driver_register:
+	class_destroy(okl4_pipe_class);
+fail_class_create:
+	unregister_chrdev_region(dev_num, MAX_PIPES);
+fail_alloc_chrdev_region:
+	return err;
+}
+
+static void __exit
+okl4_pipe_exit(void)
+{
+	dev_t dev_num = MKDEV(okl4_pipe_major, 0);
+
+	platform_driver_unregister(&okl4_pipe_driver);
+	class_destroy(okl4_pipe_class);
+	unregister_chrdev_region(dev_num, MAX_PIPES);
+}
+
+module_init(okl4_pipe_init);
+module_exit(okl4_pipe_exit);
+
+MODULE_DESCRIPTION("OKL4 pipe driver");
+MODULE_AUTHOR("John Clarke <johnc@cog.systems>");
diff --git a/drivers/char/vs_serial_client.c b/drivers/char/vs_serial_client.c
new file mode 100644
index 0000000..a0bf1cc
--- /dev/null
+++ b/drivers/char/vs_serial_client.c
@@ -0,0 +1,132 @@
+/*
+ * drivers/char/vs_serial_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Serial vService client driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/client.h>
+
+#include "vs_serial_common.h"
+
+#define client_state_to_port(state) \
+	container_of(state, struct vtty_port, u.vs_client)
+
+static struct vs_mbuf *vs_serial_client_alloc_msg_buf(struct vtty_port *port,
+		struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+	return vs_client_serial_serial_alloc_msg(&port->u.vs_client, pbuf,
+			gfp_flags);
+}
+
+static void vs_serial_client_free_msg_buf(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+	vs_client_serial_serial_free_msg(&port->u.vs_client, pbuf, mbuf);
+}
+
+static int vs_serial_client_send_msg_buf(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+	return vs_client_serial_serial_send_msg(&port->u.vs_client, *pbuf,
+			mbuf);
+}
+
+static bool vs_serial_client_is_vservices_running(struct vtty_port *port)
+{
+	return VSERVICE_BASE_STATE_IS_RUNNING(port->u.vs_client.state.base);
+}
+
+static struct vtty_port_ops client_port_ops = {
+	.alloc_msg_buf	= vs_serial_client_alloc_msg_buf,
+	.free_msg_buf	= vs_serial_client_free_msg_buf,
+	.send_msg_buf	= vs_serial_client_send_msg_buf,
+	.is_running	= vs_serial_client_is_vservices_running,
+};
+
+static struct vs_client_serial_state *
+vs_serial_client_alloc(struct vs_service_device *service)
+{
+	struct vtty_port *port;
+
+	port = vs_serial_alloc_port(service, &client_port_ops);
+	if (!port)
+		return NULL;
+
+	dev_set_drvdata(&service->dev, port);
+	return &port->u.vs_client;
+}
+
+static void vs_serial_client_release(struct vs_client_serial_state *_state)
+{
+	vs_serial_release(client_state_to_port(_state));
+}
+
+static void vs_serial_client_closed(struct vs_client_serial_state *_state)
+{
+	vs_serial_reset(client_state_to_port(_state));
+}
+
+static void vs_serial_client_opened(struct vs_client_serial_state *_state)
+{
+	struct vtty_port *port = client_state_to_port(_state);
+
+	dev_dbg(&port->service->dev, "ack_open\n");
+	port->max_transfer_size = _state->packet_size;
+}
+
+static int
+vs_serial_client_handle_message(struct vs_client_serial_state *_state,
+		struct vs_pbuf data, struct vs_mbuf *mbuf)
+{
+	return vs_serial_handle_message(client_state_to_port(_state), mbuf,
+			&data);
+}
+
+static struct vs_client_serial vs_client_serial_driver = {
+	.rx_atomic		= true,
+	.alloc			= vs_serial_client_alloc,
+	.release		= vs_serial_client_release,
+	.closed			= vs_serial_client_closed,
+	.opened			= vs_serial_client_opened,
+	.serial = {
+		.msg_msg	= vs_serial_client_handle_message,
+	},
+};
+
+static int __init vs_serial_client_init(void)
+{
+	return vservice_serial_client_register(&vs_client_serial_driver,
+			"vserial");
+}
+
+static void __exit vs_serial_client_exit(void)
+{
+	vservice_serial_client_unregister(&vs_client_serial_driver);
+}
+
+module_init(vs_serial_client_init);
+module_exit(vs_serial_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/char/vs_serial_common.h b/drivers/char/vs_serial_common.h
new file mode 100644
index 0000000..2fe7d28
--- /dev/null
+++ b/drivers/char/vs_serial_common.h
@@ -0,0 +1,91 @@
+/*
+ * drivers/char/vs_serial_common.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _VS_SERIAL_COMMON_H
+#define _VS_SERIAL_COMMON_H
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/console.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/protocol/serial/client.h>
+
+#define OUTBUFFER_SIZE 1024
+#define vtty_list_last_entry(ptr, type, member) \
+	list_entry((ptr)->prev, type, member)
+
+struct vtty_port;
+struct vs_service_device;
+
+struct vtty_port_ops {
+	struct vs_mbuf	*(*alloc_msg_buf)(struct vtty_port *port,
+			struct vs_pbuf *pbuf, gfp_t gfp_flags);
+	void		(*free_msg_buf)(struct vtty_port *port,
+			struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+	int		(*send_msg_buf)(struct vtty_port *port,
+			struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+	bool		(*is_running)(struct vtty_port *port);
+};
+
+struct vtty_port {
+	union {
+		struct vs_client_serial_state vs_client;
+		struct vs_server_serial_state vs_server;
+	} u;
+
+	struct vs_service_device	*service;
+	int				port_num;
+
+	struct tty_driver		*vtty_driver;
+
+	struct vtty_port_ops		ops;
+
+	/* output data */
+	bool				doing_release;
+
+	int				max_transfer_size;
+
+	/* Tracks if tty layer can receive data from driver */
+	bool				tty_canrecv;
+
+	/*
+	 * List of pending incoming buffers from the vServices stack. If we
+	 * receive a buffer, but cannot write it to the tty layer then we
+	 * queue it on this list to handle later. in_lock protects access to
+	 * the pending_in_packets list and the tty_canrecv field.
+	 */
+	struct list_head		pending_in_packets;
+	spinlock_t			in_lock;
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+	struct console			console;
+#endif
+
+	struct tty_port			port;
+};
+
+extern struct vtty_port *
+vs_serial_alloc_port(struct vs_service_device *service,
+	struct vtty_port_ops *port_ops);
+extern void vs_serial_release(struct vtty_port *port);
+extern void vs_serial_reset(struct vtty_port *port);
+extern int vs_serial_handle_message(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+
+#endif /* _VS_SERIAL_COMMON_H */
diff --git a/drivers/char/vs_serial_server.c b/drivers/char/vs_serial_server.c
new file mode 100644
index 0000000..d4a169e
--- /dev/null
+++ b/drivers/char/vs_serial_server.c
@@ -0,0 +1,152 @@
+/*
+ * drivers/char/vs_serial_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Serial vService server driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+
+#include "vs_serial_common.h"
+
+#define server_state_to_port(state) \
+	container_of(state, struct vtty_port, u.vs_server)
+
+static struct vs_mbuf *vs_serial_server_alloc_msg_buf(struct vtty_port *port,
+		struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+	return vs_server_serial_serial_alloc_msg(&port->u.vs_server, pbuf,
+			gfp_flags);
+}
+
+static void vs_serial_server_free_msg_buf(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+	vs_server_serial_serial_free_msg(&port->u.vs_server, pbuf, mbuf);
+}
+
+static int vs_serial_server_send_msg_buf(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+	return vs_server_serial_serial_send_msg(&port->u.vs_server, *pbuf, mbuf);
+}
+
+static bool vs_serial_server_is_vservices_running(struct vtty_port *port)
+{
+	return VSERVICE_BASE_STATE_IS_RUNNING(port->u.vs_server.state.base);
+}
+
+static struct vtty_port_ops server_port_ops = {
+	.alloc_msg_buf	= vs_serial_server_alloc_msg_buf,
+	.free_msg_buf	= vs_serial_server_free_msg_buf,
+	.send_msg_buf	= vs_serial_server_send_msg_buf,
+	.is_running	= vs_serial_server_is_vservices_running,
+};
+
+static struct vs_server_serial_state *
+vs_serial_server_alloc(struct vs_service_device *service)
+{
+	struct vtty_port *port;
+
+	port = vs_serial_alloc_port(service, &server_port_ops);
+	if (!port)
+		return NULL;
+
+	dev_set_drvdata(&service->dev, port);
+	return &port->u.vs_server;
+}
+
+static void vs_serial_server_release(struct vs_server_serial_state *_state)
+{
+	vs_serial_release(server_state_to_port(_state));
+}
+
+static void vs_serial_server_closed(struct vs_server_serial_state *_state)
+{
+	vs_serial_reset(server_state_to_port(_state));
+}
+
+static int
+vs_serial_server_handle_message(struct vs_server_serial_state *_state,
+		struct vs_pbuf data, struct vs_mbuf *mbuf)
+{
+	return vs_serial_handle_message(server_state_to_port(_state), mbuf,
+			&data);
+}
+
+static vs_server_response_type_t
+vs_serial_server_req_open(struct vs_server_serial_state *_state)
+{
+	struct vtty_port *port = server_state_to_port(_state);
+
+	dev_dbg(&port->service->dev, "req_open\n");
+
+	/* FIXME: Jira ticket SDK-3521 - ryanm. */
+	port->max_transfer_size = vs_service_max_mbuf_size(port->service) - 8;
+	_state->packet_size = port->max_transfer_size;
+
+	return VS_SERVER_RESP_SUCCESS;
+}
+
+static vs_server_response_type_t
+vs_serial_server_req_close(struct vs_server_serial_state *_state)
+{
+	struct vtty_port *port = server_state_to_port(_state);
+
+	dev_dbg(&port->service->dev, "req_close\n");
+
+	return VS_SERVER_RESP_SUCCESS;
+}
+
+static struct vs_server_serial vs_server_serial_driver = {
+	.rx_atomic		= true,
+	.alloc			= vs_serial_server_alloc,
+	.release		= vs_serial_server_release,
+	.closed			= vs_serial_server_closed,
+	.open			= vs_serial_server_req_open,
+	.close			= vs_serial_server_req_close,
+	.serial = {
+		.msg_msg	= vs_serial_server_handle_message,
+	},
+
+	/* Large default quota for batching data messages */
+	.in_quota_best		= 16,
+	.out_quota_best		= 16,
+};
+
+static int __init vs_serial_server_init(void)
+{
+	return vservice_serial_server_register(&vs_server_serial_driver,
+			"vserial");
+}
+
+static void __exit vs_serial_server_exit(void)
+{
+	vservice_serial_server_unregister(&vs_server_serial_driver);
+}
+
+module_init(vs_serial_server_init);
+module_exit(vs_serial_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/char/vservices_serial.c b/drivers/char/vservices_serial.c
new file mode 100644
index 0000000..0194eac
--- /dev/null
+++ b/drivers/char/vservices_serial.c
@@ -0,0 +1,634 @@
+/*
+ * drivers/char/vservice_serial.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * serial vservice client driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitmap.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/console.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/service.h>
+#include <vservices/wait.h>
+
+#include "vs_serial_common.h"
+
+struct vtty_in_packet {
+	struct vs_pbuf	pbuf;
+	size_t		offset;
+};
+
+static int max_ttys = CONFIG_VSERVICES_VTTY_COUNT;
+static unsigned long *alloced_ttys;
+module_param(max_ttys, int, S_IRUGO);
+
+static struct tty_driver *vtty_driver;
+
+static DEFINE_MUTEX(tty_bitmap_lock);
+
+static struct vtty_port *dev_to_port(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+#if defined(CONFIG_VSERVICES_SERIAL_SERVER) || \
+    defined(CONFIG_VSERIVCES_SERIAL_SERVER_MODULE)
+	if (service->is_server) {
+		struct vs_server_serial_state *server = dev_get_drvdata(dev);
+		return container_of(server, struct vtty_port, u.vs_server);
+	}
+#endif
+#if defined(CONFIG_VSERVICES_SERIAL_CLIENT) || \
+    defined(CONFIG_VSERIVCES_SERIAL_CLIENT_MODULE)
+	if (!service->is_server) {
+		struct vs_client_serial_state *client = dev_get_drvdata(dev);
+		return container_of(client, struct vtty_port, u.vs_client);
+	}
+#endif
+	/* should never get here */
+	WARN_ON(1);
+	return NULL;
+}
+
+static struct vtty_port *port_from_tty(struct tty_struct *tty)
+{
+	return dev_to_port(tty->dev->parent);
+}
+
+static int vtty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	struct vtty_port *port;
+
+	if (tty->index < 0 || !test_bit(tty->index, alloced_ttys))
+		return -ENXIO;
+
+	port = port_from_tty(tty);
+
+	if (!port)
+		return -ENXIO;
+
+	tty->driver_data = port;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+	if (tty->port)
+		tty->port->low_latency = 0;
+#else
+	tty->low_latency = 0;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+	tty_port_install(&port->port, driver, tty);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+	tty->port = &port->port;
+	tty_standard_install(driver, tty);
+#else
+	tty->port = &port->port;
+	if (tty_init_termios(tty) != 0)
+		return -ENOMEM;
+
+	tty_driver_kref_get(driver);
+	tty->count++;
+	driver->ttys[tty->index] = tty;
+#endif
+
+	return 0;
+}
+
+static int vtty_open(struct tty_struct *tty, struct file *file)
+{
+	struct vtty_port *port = tty->driver_data;
+	return tty_port_open(&port->port, tty, file);
+}
+
+static void vtty_close(struct tty_struct *tty, struct file *file)
+{
+	struct vtty_port *port = tty->driver_data;
+	if (port)
+		tty_port_close(&port->port, tty, file);
+}
+
+static void vtty_shutdown(struct tty_port *port)
+{
+	struct vtty_port *vtty_port =
+			container_of(port, struct vtty_port, port);
+
+	if (vtty_port->doing_release)
+		kfree(port);
+}
+
+static int vtty_write_room(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+
+	return vs_service_send_mbufs_available(port->service) *
+			port->max_transfer_size;
+}
+
+static struct vs_mbuf *vserial_alloc_send_buffer(struct vtty_port *port,
+		const unsigned char *buf, size_t size, struct vs_pbuf *pbuf,
+		gfp_t gfp_flags)
+{
+	struct vs_mbuf *mbuf;
+	ssize_t ret;
+
+	mbuf = port->ops.alloc_msg_buf(port, pbuf, gfp_flags);
+	if (IS_ERR(mbuf)) {
+		ret = PTR_ERR(mbuf);
+		goto fail;
+	}
+
+	ret = vs_pbuf_resize(pbuf, size);
+	if (ret < (ssize_t)size)
+		goto fail_free_buf;
+
+	ret = vs_pbuf_copyin(pbuf, 0, buf, size);
+	if (ret < (ssize_t)size)
+		goto fail_free_buf;
+
+	return mbuf;
+
+fail_free_buf:
+	port->ops.free_msg_buf(port, mbuf, pbuf);
+fail:
+	return ERR_PTR(ret);
+}
+
+static int vtty_write(struct tty_struct *tty, const unsigned char *buf,
+		int count)
+{
+	struct vtty_port *port;
+	size_t sent_bytes = 0, size;
+	struct vs_mbuf *mbuf;
+	struct vs_pbuf pbuf;
+	int err;
+
+	if (WARN_ON(!tty || !buf))
+		return -EINVAL;
+
+	port = tty->driver_data;
+	if (!port->ops.is_running(port)) {
+		dev_dbg(&port->service->dev, "tty is not running!");
+		return 0;
+	}
+
+	/*
+	 * We need to break our message up into chunks of
+	 * port->max_transfer_size.
+	 */
+	dev_dbg(&port->service->dev, "Writing %d bytes\n", count);
+	while (sent_bytes < count) {
+		size = min_t(size_t, count - sent_bytes,
+				port->max_transfer_size);
+
+		/*
+		 * Passing &port->u.vs_client here works for both the client
+		 * and the server since vs_client and vs_server are in the
+		 * same union, and therefore have the same address.
+		 */
+		mbuf = vs_service_waiting_alloc(&port->u.vs_client,
+				vserial_alloc_send_buffer(port,
+				buf + sent_bytes, size, &pbuf, GFP_KERNEL));
+		if (IS_ERR(mbuf)) {
+			dev_err(&port->service->dev,
+					"Failed to alloc mbuf of %zu bytes: %ld - resetting service\n",
+					size, PTR_ERR(mbuf));
+			vs_service_reset(port->service, port->service);
+			return -EIO;
+		}
+
+		vs_service_state_lock(port->service);
+		err = port->ops.send_msg_buf(port, mbuf, &pbuf);
+		vs_service_state_unlock(port->service);
+		if (err) {
+			port->ops.free_msg_buf(port, mbuf, &pbuf);
+			dev_err(&port->service->dev,
+					"send failed: %d - resetting service",
+					err);
+			vs_service_reset(port->service, port->service);
+			return -EIO;
+		}
+
+		dev_dbg(&port->service->dev, "Sent %zu bytes (%zu/%d)\n",
+				size, sent_bytes + size, count);
+		sent_bytes += size;
+	}
+
+	dev_dbg(&port->service->dev, "Write complete - sent %zu/%d bytes\n",
+			sent_bytes, count);
+	return sent_bytes;
+}
+
+static int vtty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	return vtty_write(tty, &ch, 1);
+}
+
+static size_t vs_serial_send_pbuf_to_tty(struct vtty_port *port,
+		struct vs_pbuf *pbuf, size_t offset)
+{
+	struct tty_struct *tty = tty_port_tty_get(&port->port);
+	size_t space, size;
+
+	lockdep_assert_held(&port->in_lock);
+
+	size = vs_pbuf_size(pbuf) - offset;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+	space = tty_buffer_request_room(tty->port, size);
+#else
+	space = tty_buffer_request_room(tty, size);
+#endif
+	if (space) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+		tty_insert_flip_string(tty->port, pbuf->data + offset, space);
+		tty_flip_buffer_push(tty->port);
+#else
+		tty_insert_flip_string(tty, pbuf->data + offset, space);
+		tty_flip_buffer_push(tty);
+#endif
+	}
+
+	tty_kref_put(tty);
+
+	/* Return the number of bytes written */
+	return space;
+}
+
+static void vtty_throttle(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+
+	dev_dbg(&port->service->dev, "throttle\n");
+
+	spin_lock_bh(&port->in_lock);
+	port->tty_canrecv = false;
+	spin_unlock_bh(&port->in_lock);
+}
+
+static void vtty_unthrottle(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+	struct vtty_in_packet *packet;
+	struct vs_mbuf *mbuf;
+	size_t sent;
+
+	dev_dbg(&port->service->dev, "unthrottle\n");
+
+	spin_lock_bh(&port->in_lock);
+
+	while (!list_empty(&port->pending_in_packets)) {
+		mbuf = list_first_entry(&port->pending_in_packets,
+				struct vs_mbuf, queue);
+		packet = mbuf->priv;
+
+		sent = vs_serial_send_pbuf_to_tty(port, &packet->pbuf,
+				packet->offset);
+		packet->offset += sent;
+		if (packet->offset < vs_pbuf_size(&packet->pbuf)) {
+			/*
+			 * Only wrote part of the buffer. This means that we
+			 * still have pending data that cannot be written to
+			 * the tty at this time. The tty layer will rethrottle
+			 * and this function will be called again when the tty
+			 * layer is next able to handle data and we can write
+			 * the remainder of the buffer.
+			 */
+			dev_dbg(&port->service->dev,
+					"unthrottle: Only wrote %zu (%zu/%zu) bytes\n",
+					sent, packet->offset,
+					vs_pbuf_size(&packet->pbuf));
+			break;
+		}
+
+		dev_dbg(&port->service->dev,
+				"unthrottle: wrote %zu (%zu/%zu) bytes\n",
+				sent, packet->offset,
+				vs_pbuf_size(&packet->pbuf));
+
+		/* Wrote the whole buffer - free it */
+		list_del(&mbuf->queue);
+		port->ops.free_msg_buf(port, mbuf, &packet->pbuf);
+		kfree(packet);
+	}
+
+	port->tty_canrecv = true;
+	spin_unlock_bh(&port->in_lock);
+}
+
+static struct tty_port_operations vtty_port_ops = {
+	.shutdown	= vtty_shutdown,
+};
+
+static struct tty_operations vtty_ops = {
+	.install	= vtty_install,
+	.open		= vtty_open,
+	.close		= vtty_close,
+	.write		= vtty_write,
+	.write_room	= vtty_write_room,
+	.put_char	= vtty_put_char,
+	.throttle	= vtty_throttle,
+	.unthrottle	= vtty_unthrottle
+};
+
+static int vs_serial_queue_incoming_packet(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf, size_t offset)
+{
+	struct vtty_in_packet *packet;
+
+	lockdep_assert_held(&port->in_lock);
+
+	packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+	if (!packet) {
+		/*
+		 * Uh oh, we are seriously out of memory. The incoming data
+		 * will be lost.
+		 */
+		return -ENOMEM;
+	}
+
+	dev_dbg(&port->service->dev, "Queuing packet %zu bytes, offset %zu\n",
+			vs_pbuf_size(pbuf), offset);
+	mbuf->priv = packet;
+	memcpy(&packet->pbuf, pbuf, sizeof(*pbuf));
+	packet->offset = offset;
+
+	list_add_tail(&mbuf->queue, &port->pending_in_packets);
+	return 0;
+}
+
+int vs_serial_handle_message(struct vtty_port *port, struct vs_mbuf *mbuf,
+		struct vs_pbuf *pbuf)
+{
+	struct tty_struct *tty = tty_port_tty_get(&port->port);
+	bool queue_packet = false;
+	size_t sent = 0;
+	int err;
+
+	if (!tty) {
+		dev_dbg(&port->service->dev,
+				"tty not open. Dropping %zu chars\n",
+				pbuf->size);
+		port->ops.free_msg_buf(port, mbuf, pbuf);
+		return 0;
+	}
+
+	dev_dbg(&port->service->dev, "Incoming message - len = %zu\n",
+			pbuf->size);
+
+	spin_lock(&port->in_lock);
+	if (!port->tty_canrecv || !list_empty(&port->pending_in_packets)) {
+		/*
+		 * We cannot send to the tty right now, either because we are
+		 * being throttled or because we still have pending data
+		 * to write out to the tty. Queue the buffer up so we can
+		 * write it later.
+		 */
+		dev_dbg(&port->service->dev,
+				"Cannot send (canrecv = %d, queued = %d) - queuing message\n",
+				port->tty_canrecv,
+				!list_empty(&port->pending_in_packets));
+		queue_packet = true;
+
+	} else {
+		sent = vs_serial_send_pbuf_to_tty(port, pbuf, 0);
+		if (sent < vs_pbuf_size(pbuf)) {
+			/*
+			 * Only wrote part of the buffer to the tty. Queue
+			 * the buffer to write the rest.
+			 */
+			dev_dbg(&port->service->dev,
+					"Sent %zu/%zu bytes to tty - queueing rest\n",
+					sent, vs_pbuf_size(pbuf));
+			queue_packet = true;
+		}
+	}
+
+	if (queue_packet) {
+		/*
+		 * Queue the incoming data up. If we are not already throttled,
+		 * the tty layer will do so now since it has no room in its
+		 * buffers.
+		 */
+		err = vs_serial_queue_incoming_packet(port, mbuf, pbuf, sent);
+		if (err) {
+			dev_err(&port->service->dev,
+					"Failed to queue packet - dropping chars\n");
+			port->ops.free_msg_buf(port, mbuf, pbuf);
+		}
+
+	} else {
+		port->ops.free_msg_buf(port, mbuf, pbuf);
+	}
+
+	spin_unlock(&port->in_lock);
+	tty_kref_put(tty);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_serial_handle_message);
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+static int vconsole_setup(struct console *co, char *options)
+{
+	if (co->index < 0 || co->index >= max_ttys)
+		co->index = 0;
+
+	pr_info("OKL4 virtual console init\n");
+
+	return 0;
+}
+
+static void vconsole_write(struct console *co, const char *p, unsigned count)
+{
+}
+
+static struct tty_driver *vconsole_device(struct console *co, int *index)
+{
+	*index = co->index;
+
+	return vtty_driver;
+}
+#endif /* CONFIG_OKL4_VTTY_CONSOLE */
+
+static void vs_serial_free_buffers(struct vtty_port *port)
+{
+	struct vtty_in_packet *packet;
+	struct vs_mbuf *mbuf;
+
+	/* Free the list of incoming buffers */
+	spin_lock_bh(&port->in_lock);
+	while (!list_empty(&port->pending_in_packets)) {
+		mbuf = list_first_entry(&port->pending_in_packets,
+				struct vs_mbuf, queue);
+		packet = mbuf->priv;
+
+		list_del(&mbuf->queue);
+		port->ops.free_msg_buf(port, mbuf, &packet->pbuf);
+		kfree(packet);
+	}
+	spin_unlock_bh(&port->in_lock);
+}
+
+/** vservices callbacks **/
+struct vtty_port *vs_serial_alloc_port(struct vs_service_device *service,
+		struct vtty_port_ops *port_ops)
+{
+	struct vtty_port *port;
+	int port_num;
+
+	mutex_lock(&tty_bitmap_lock);
+	port_num = find_first_zero_bit(alloced_ttys, max_ttys);
+
+	if (port_num >= max_ttys) {
+		mutex_unlock(&tty_bitmap_lock);
+		return NULL;
+	}
+
+	port = kzalloc(sizeof(struct vtty_port), GFP_KERNEL);
+	if (!port) {
+		mutex_unlock(&tty_bitmap_lock);
+		return NULL;
+	}
+
+	port->service = service;
+	port->ops = *port_ops;
+	port->tty_canrecv = true;
+	port->port_num = port_num;
+	INIT_LIST_HEAD(&port->pending_in_packets);
+	spin_lock_init(&port->in_lock);
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+	/* Set up and register the port's console device */
+	strlcpy(port->console.name, "vconvs", sizeof(port->console.name));
+	port->console.write = vconsole_write;
+	port->console.flags = CON_PRINTBUFFER;
+	port->console.device = vconsole_device;
+	port->console.setup = vconsole_setup;
+	port->console.index = port_num;
+
+	register_console(&port->console);
+#endif
+	port->vtty_driver = vtty_driver;
+
+	tty_port_init(&port->port);
+	port->port.ops = &vtty_port_ops;
+
+	tty_register_device(vtty_driver, port_num, &service->dev);
+	bitmap_set(alloced_ttys, port_num, 1);
+	mutex_unlock(&tty_bitmap_lock);
+
+	return port;
+}
+EXPORT_SYMBOL(vs_serial_alloc_port);
+
+void vs_serial_release(struct vtty_port *port)
+{
+	dev_dbg(&port->service->dev, "Release\n");
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+	unregister_console(&port->console);
+#endif
+
+	mutex_lock(&tty_bitmap_lock);
+	bitmap_clear(alloced_ttys, port->port_num, 1);
+	mutex_unlock(&tty_bitmap_lock);
+
+	if (port->port.tty) {
+		tty_vhangup(port->port.tty);
+		tty_kref_put(port->port.tty);
+	}
+
+	vs_serial_free_buffers(port);
+	port->doing_release = true;
+	tty_unregister_device(vtty_driver, port->port_num);
+}
+EXPORT_SYMBOL_GPL(vs_serial_release);
+
+void vs_serial_reset(struct vtty_port *port)
+{
+	/* Free list of in and out mbufs. */
+	vs_serial_free_buffers(port);
+}
+EXPORT_SYMBOL_GPL(vs_serial_reset);
+
+static int __init vs_serial_init(void)
+{
+	int err;
+
+	if (max_ttys == 0)
+		return -EINVAL;
+
+	alloced_ttys = kzalloc(sizeof(unsigned long) * BITS_TO_LONGS(max_ttys),
+			GFP_KERNEL);
+	if (!alloced_ttys) {
+		err = -ENOMEM;
+		goto fail_alloc_ttys;
+	}
+
+	/* Set up the tty driver. */
+	vtty_driver = alloc_tty_driver(max_ttys);
+	if (!vtty_driver) {
+		err = -ENOMEM;
+		goto fail_alloc_tty_driver;
+	}
+
+	vtty_driver->owner = THIS_MODULE;
+	vtty_driver->driver_name = "okl4-vservices-serial";
+	vtty_driver->name = "ttyVS";
+	vtty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	vtty_driver->subtype = SERIAL_TYPE_NORMAL;
+	vtty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	vtty_driver->init_termios = tty_std_termios;
+	vtty_driver->num = max_ttys;
+
+	/* These flags don't really matter; just use sensible defaults. */
+	vtty_driver->init_termios.c_cflag =
+			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+	vtty_driver->init_termios.c_ispeed = 9600;
+	vtty_driver->init_termios.c_ospeed = 9600;
+
+	tty_set_operations(vtty_driver, &vtty_ops);
+
+	err = tty_register_driver(vtty_driver);
+	if (err)
+		goto fail_tty_driver_register;
+
+	return 0;
+
+fail_tty_driver_register:
+	put_tty_driver(vtty_driver);
+fail_alloc_tty_driver:
+	kfree(alloced_ttys);
+fail_alloc_ttys:
+	return err;
+}
+
+static void __exit vs_serial_exit(void)
+{
+	tty_unregister_driver(vtty_driver);
+	put_tty_driver(vtty_driver);
+}
+
+module_init(vs_serial_init);
+module_exit(vs_serial_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Core Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 7b23db4..aa8dccc 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -95,6 +95,9 @@
 #define PLL_PLL_INT_GAIN_IFILT_BAND_1		0x15c
 #define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1	0x164
 #define PLL_FASTLOCK_EN_BAND			0x16c
+#define PLL_FREQ_TUNE_ACCUM_INIT_LOW		0x170
+#define PLL_FREQ_TUNE_ACCUM_INIT_MID		0x174
+#define PLL_FREQ_TUNE_ACCUM_INIT_HIGH		0x178
 #define PLL_FREQ_TUNE_ACCUM_INIT_MUX		0x17c
 #define PLL_PLL_LOCK_OVERRIDE			0x180
 #define PLL_PLL_LOCK_DELAY			0x184
@@ -112,6 +115,7 @@
 #define PHY_CMN_RBUF_CTRL	0x01c
 #define PHY_CMN_PLL_CNTRL	0x038
 #define PHY_CMN_CTRL_0		0x024
+#define PHY_CMN_CTRL_2		0x02c
 
 /* Bit definition of SSC control registers */
 #define SSC_CENTER		BIT(0)
@@ -123,6 +127,43 @@
 #define SSC_START		BIT(6)
 #define SSC_START_MUX		BIT(7)
 
+/* Dynamic Refresh Control Registers */
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0		(0x014)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1		(0x018)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2		(0x01C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3		(0x020)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4		(0x024)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5		(0x028)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6		(0x02C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7		(0x030)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8		(0x034)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9		(0x038)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10		(0x03C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11		(0x040)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12		(0x044)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13		(0x048)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14		(0x04C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15		(0x050)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16		(0x054)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17		(0x058)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18		(0x05C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19		(0x060)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20		(0x064)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21		(0x068)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22		(0x06C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23		(0x070)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24		(0x074)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25		(0x078)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26		(0x07C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27		(0x080)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28		(0x084)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29		(0x088)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30		(0x08C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31		(0x090)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR	(0x094)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2	(0x098)
+
+#define DSI_PHY_TO_PLL_OFFSET	(0x600)
 enum {
 	DSI_PLL_0,
 	DSI_PLL_1,
@@ -644,6 +685,7 @@
 
 	rsc->vco_current_rate = rate;
 	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+	rsc->dfps_trigger = false;
 
 	rc = mdss_pll_resource_enable(rsc, true);
 	if (rc) {
@@ -674,6 +716,237 @@
 	return 0;
 }
 
+static int dsi_pll_read_stored_trim_codes(struct mdss_pll_resources *pll_res,
+					  unsigned long vco_clk_rate)
+{
+	int i;
+	bool found = false;
+
+	if (!pll_res->dfps)
+		return -EINVAL;
+
+	for (i = 0; i < pll_res->dfps->vco_rate_cnt; i++) {
+		struct dfps_codes_info *codes_info =
+			&pll_res->dfps->codes_dfps[i];
+
+		pr_debug("valid=%d vco_rate=%d, code %d %d %d\n",
+			codes_info->is_valid, codes_info->clk_rate,
+			codes_info->pll_codes.pll_codes_1,
+			codes_info->pll_codes.pll_codes_2,
+			codes_info->pll_codes.pll_codes_3);
+
+		if (vco_clk_rate != codes_info->clk_rate &&
+				codes_info->is_valid)
+			continue;
+
+		pll_res->cache_pll_trim_codes[0] =
+			codes_info->pll_codes.pll_codes_1;
+		pll_res->cache_pll_trim_codes[1] =
+			codes_info->pll_codes.pll_codes_2;
+		pll_res->cache_pll_trim_codes[2] =
+			codes_info->pll_codes.pll_codes_3;
+		found = true;
+		break;
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	pr_debug("trim_code_0=0x%x trim_code_1=0x%x trim_code_2=0x%x\n",
+			pll_res->cache_pll_trim_codes[0],
+			pll_res->cache_pll_trim_codes[1],
+			pll_res->cache_pll_trim_codes[2]);
+
+	return 0;
+}
+
+static void shadow_dsi_pll_dynamic_refresh_10nm(struct dsi_pll_10nm *pll,
+						struct mdss_pll_resources *rsc)
+{
+	u32 data;
+	u32 offset = DSI_PHY_TO_PLL_OFFSET;
+	u32 upper_addr = 0;
+	struct dsi_pll_regs *reg = &pll->reg_setup;
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+	data &= ~BIT(5);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL0,
+			   PHY_CMN_CLK_CFG1, PHY_CMN_PLL_CNTRL, data, 0);
+	upper_addr |= (upper_8_bit(PHY_CMN_CLK_CFG1) << 0);
+	upper_addr |= (upper_8_bit(PHY_CMN_PLL_CNTRL) << 1);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL1,
+			   PHY_CMN_RBUF_CTRL,
+			   (PLL_DECIMAL_DIV_START_1 + offset),
+			   0, reg->decimal_div_start);
+	upper_addr |= (upper_8_bit(PHY_CMN_RBUF_CTRL) << 2);
+	upper_addr |= (upper_8_bit(PLL_DECIMAL_DIV_START_1 + offset) << 3);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL2,
+			   (PLL_FRAC_DIV_START_LOW_1 + offset),
+			   (PLL_FRAC_DIV_START_MID_1 + offset),
+			   reg->frac_div_start_low, reg->frac_div_start_mid);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_LOW_1 + offset) << 4);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_MID_1 + offset) << 5);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL3,
+			   (PLL_FRAC_DIV_START_HIGH_1 + offset),
+			   (PLL_PLL_PROP_GAIN_RATE_1 + offset),
+			   reg->frac_div_start_high, reg->pll_prop_gain_rate);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_HIGH_1 + offset) << 6);
+	upper_addr |= (upper_8_bit(PLL_PLL_PROP_GAIN_RATE_1 + offset) << 7);
+
+	data = MDSS_PLL_REG_R(rsc->pll_base, PLL_PLL_OUTDIV_RATE) & 0x03;
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL4,
+			   (PLL_PLL_OUTDIV_RATE + offset),
+			   (PLL_FREQ_TUNE_ACCUM_INIT_LOW + offset),
+			   data, 0);
+	upper_addr |= (upper_8_bit(PLL_PLL_OUTDIV_RATE + offset) << 8);
+	upper_addr |= (upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_LOW + offset) << 9);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL5,
+			   (PLL_FREQ_TUNE_ACCUM_INIT_MID + offset),
+			   (PLL_FREQ_TUNE_ACCUM_INIT_HIGH + offset),
+			   rsc->cache_pll_trim_codes[1],
+			   rsc->cache_pll_trim_codes[0]);
+	upper_addr |=
+		(upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_MID + offset) << 10);
+	upper_addr |=
+		(upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_HIGH + offset) << 11);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL6,
+			   (PLL_FREQ_TUNE_ACCUM_INIT_MUX + offset),
+			   (PLL_PLL_BAND_SET_RATE_1 + offset),
+			   0x07, rsc->cache_pll_trim_codes[2]);
+	upper_addr |=
+		(upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_MUX + offset) << 12);
+	upper_addr |= (upper_8_bit(PLL_PLL_BAND_SET_RATE_1 + offset) << 13);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL7,
+			   (PLL_CALIBRATION_SETTINGS + offset),
+			   (PLL_BAND_SEL_CAL_SETTINGS + offset), 0x44, 0x3a);
+	upper_addr |= (upper_8_bit(PLL_CALIBRATION_SETTINGS + offset) << 14);
+	upper_addr |= (upper_8_bit(PLL_BAND_SEL_CAL_SETTINGS + offset) << 15);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL8,
+			   (PLL_PLL_LOCKDET_RATE_1 + offset),
+			   (PLL_PLL_LOCK_DELAY + offset), 0x10, 0x06);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCKDET_RATE_1 + offset) << 16);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCK_DELAY + offset) << 17);
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL17,
+			   PHY_CMN_CTRL_2, PHY_CMN_CLK_CFG0, 0x40, data);
+	if (rsc->slave)
+		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+				   DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+				   PHY_CMN_CLK_CFG0, PHY_CMN_CTRL_0,
+				   data, 0x7f);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL18,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	/* Dummy register writes */
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL19,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL20,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL21,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL22,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL23,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL24,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL25,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL26,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+
+	/* Registers to configure after PLL enable delay */
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1) | BIT(5);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+			   PHY_CMN_CLK_CFG1, PHY_CMN_RBUF_CTRL, data, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+			   PHY_CMN_CLK_CFG1, PHY_CMN_CLK_CFG1, data, data);
+	if (rsc->slave) {
+		data = MDSS_PLL_REG_R(rsc->slave->phy_base, PHY_CMN_CLK_CFG1) |
+			BIT(5);
+		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+				   DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+				   PHY_CMN_CLK_CFG1, PHY_CMN_RBUF_CTRL,
+				   data, 0x01);
+		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+				   DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+				   PHY_CMN_CLK_CFG1, PHY_CMN_CLK_CFG1,
+				   data, data);
+	}
+
+	MDSS_PLL_REG_W(rsc->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, upper_addr);
+	MDSS_PLL_REG_W(rsc->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0);
+	wmb(); /* commit register writes */
+}
+
+static int shadow_vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
+			unsigned long parent_rate)
+{
+	int rc;
+	struct dsi_pll_10nm *pll;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	if (!rsc) {
+		pr_err("pll resource not found\n");
+		return -EINVAL;
+	}
+
+	pll = rsc->priv;
+	if (!pll) {
+		pr_err("pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	rc = dsi_pll_read_stored_trim_codes(rsc, rate);
+	if (rc) {
+		pr_err("cannot find pll codes rate=%ld\n", rate);
+		return -EINVAL;
+	}
+	pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
+
+	rsc->vco_current_rate = rate;
+	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
+		       rsc->index, rc);
+		return rc;
+	}
+
+	dsi_pll_setup_config(pll, rsc);
+
+	dsi_pll_calc_dec_frac(pll, rsc);
+
+	/* program dynamic refresh control registers */
+	shadow_dsi_pll_dynamic_refresh_10nm(pll, rsc);
+
+	/* update cached vco rate */
+	rsc->vco_cached_rate = rate;
+	rsc->dfps_trigger = true;
+
+	mdss_pll_resource_enable(rsc, false);
+
+	return 0;
+}
+
 static int dsi_pll_10nm_lock_status(struct mdss_pll_resources *pll)
 {
 	int rc;
@@ -739,7 +1012,7 @@
 	phy_reg_update_bits_sub(rsc, PHY_CMN_CLK_CFG1, 0x03, rsc->cached_cfg1);
 	if (rsc->slave)
 		phy_reg_update_bits_sub(rsc->slave, PHY_CMN_CLK_CFG1,
-				0x03, rsc->cached_cfg1);
+				0x03, rsc->slave->cached_cfg1);
 	wmb(); /* ensure dsiclk_sel is always programmed before pll start */
 
 	/* Start PLL */
@@ -789,6 +1062,7 @@
 	}
 
 	rsc->handoff_resources = false;
+	rsc->dfps_trigger = false;
 
 	pr_debug("stop PLL (%d)\n", rsc->index);
 
@@ -840,16 +1114,18 @@
 	/*
 	 * During unprepare in continuous splash use case we want driver
 	 * to pick all dividers instead of retaining bootloader configurations.
+	 * Also handle use cases where dynamic refresh triggered before
+	 * first suspend/resume.
 	 */
-	if (!pll->handoff_resources) {
+	if (!pll->handoff_resources || pll->dfps_trigger) {
 		pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base,
-							PHY_CMN_CLK_CFG0);
+						  PHY_CMN_CLK_CFG0);
 		pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base,
-							PLL_PLL_OUTDIV_RATE);
+						    PLL_PLL_OUTDIV_RATE);
 		pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
-					pll->cached_cfg1, pll->cached_outdiv);
+			 pll->cached_cfg1, pll->cached_outdiv);
 
-		pll->vco_cached_rate = clk_hw_get_rate(hw);
+		pll->vco_cached_rate = clk_get_rate(hw->clk);
 	}
 
 	/*
@@ -859,9 +1135,15 @@
 	 * does not change.For such usecases, we need to ensure that the cached
 	 * value is programmed prior to PLL being locked
 	 */
-	if (pll->handoff_resources)
+	if (pll->handoff_resources) {
 		pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base,
-							PHY_CMN_CLK_CFG1);
+						  PHY_CMN_CLK_CFG1);
+		if (pll->slave)
+			pll->slave->cached_cfg1 =
+				MDSS_PLL_REG_R(pll->slave->phy_base,
+					       PHY_CMN_CLK_CFG1);
+	}
+
 	dsi_pll_disable(vco);
 	mdss_pll_resource_enable(pll, false);
 }
@@ -889,7 +1171,7 @@
 	}
 
 	if ((pll->vco_cached_rate != 0) &&
-	    (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
+	    (pll->vco_cached_rate == clk_get_rate(hw->clk))) {
 		rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
 				pll->vco_cached_rate);
 		if (rc) {
@@ -902,6 +1184,9 @@
 			pll->cached_cfg1);
 		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0,
 					pll->cached_cfg0);
+		if (pll->slave)
+			MDSS_PLL_REG_W(pll->slave->phy_base, PHY_CMN_CLK_CFG0,
+				       pll->cached_cfg0);
 		MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
 					pll->cached_outdiv);
 	}
@@ -1037,6 +1322,14 @@
 	reg_val &= ~0xF0;
 	reg_val |= (div << 4);
 	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+
+	/*
+	 * cache the current parent index for cases where parent
+	 * is not changing but rate is changing. In that case
+	 * clock framework won't call parent_set and hence dsiclk_sel
+	 * bit won't be programmed. e.g. dfps update use case.
+	 */
+	pll->cached_cfg0 = reg_val;
 }
 
 static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
@@ -1174,6 +1467,12 @@
 	.unprepare = vco_10nm_unprepare,
 };
 
+static const struct clk_ops clk_ops_shadow_vco_10nm = {
+	.recalc_rate = vco_10nm_recalc_rate,
+	.set_rate = shadow_vco_10nm_set_rate,
+	.round_rate = vco_10nm_round_rate,
+};
+
 static struct regmap_bus mdss_mux_regmap_bus = {
 	.reg_write = mdss_set_mux_sel,
 	.reg_read = mdss_get_mux_sel,
@@ -1248,6 +1547,19 @@
 	},
 };
 
+static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_vco_clk",
+			.parent_names = (const char *[]){"bi_tcxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_shadow_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
+	},
+};
+
 static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
 	.min_rate = 1000000000UL,
@@ -1261,6 +1573,19 @@
 	},
 };
 
+static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_vco_clk",
+			.parent_names = (const char *[]){"bi_tcxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_shadow_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
+	},
+};
+
 static struct clk_regmap_div dsi0pll_pll_out_div = {
 	.reg = PLL_PLL_OUTDIV_RATE,
 	.shift = 0,
@@ -1277,6 +1602,23 @@
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_pll_out_div",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_pll_out_div = {
 	.reg = PLL_PLL_OUTDIV_RATE,
 	.shift = 0,
@@ -1293,6 +1635,23 @@
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_pll_out_div",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi0pll_bitclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1307,6 +1666,21 @@
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_bitclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_bitclk_src",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_pll_out_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_bitclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1321,6 +1695,21 @@
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_bitclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_bitclk_src",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_pll_out_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_fixed_factor dsi0pll_post_vco_div = {
 	.div = 4,
 	.mult = 1,
@@ -1328,7 +1717,19 @@
 		.name = "dsi0pll_post_vco_div",
 		.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
 		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_shadow_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_post_vco_div",
+		.parent_names = (const char *[]){"dsi0pll_shadow_pll_out_div"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
 		.ops = &clk_fixed_factor_ops,
 	},
 };
@@ -1340,7 +1741,19 @@
 		.name = "dsi1pll_post_vco_div",
 		.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
 		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_shadow_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_post_vco_div",
+		.parent_names = (const char *[]){"dsi1pll_shadow_pll_out_div"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
 		.ops = &clk_fixed_factor_ops,
 	},
 };
@@ -1357,6 +1770,18 @@
 	},
 };
 
+static struct clk_fixed_factor dsi0pll_shadow_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_byteclk_src",
+		.parent_names = (const char *[]){"dsi0pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi1pll_byteclk_src = {
 	.div = 8,
 	.mult = 1,
@@ -1369,6 +1794,18 @@
 	},
 };
 
+static struct clk_fixed_factor dsi1pll_shadow_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_byteclk_src",
+		.parent_names = (const char *[]){"dsi1pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi0pll_post_bit_div = {
 	.div = 2,
 	.mult = 1,
@@ -1381,6 +1818,18 @@
 	},
 };
 
+static struct clk_fixed_factor dsi0pll_shadow_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_post_bit_div",
+		.parent_names = (const char *[]){"dsi0pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi1pll_post_bit_div = {
 	.div = 2,
 	.mult = 1,
@@ -1393,15 +1842,29 @@
 	},
 };
 
+static struct clk_fixed_factor dsi1pll_shadow_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_post_bit_div",
+		.parent_names = (const char *[]){"dsi1pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_regmap_mux dsi0pll_byteclk_mux = {
 	.shift = 0,
 	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0_phy_pll_out_byteclk",
-			.parent_names = (const char *[]){"dsi0pll_byteclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi0pll_byteclk_src",
+				"dsi0pll_shadow_byteclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+				  CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1413,9 +1876,11 @@
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1_phy_pll_out_byteclk",
-			.parent_names = (const char *[]){"dsi1pll_byteclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi1pll_byteclk_src",
+				"dsi1pll_shadow_byteclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+				  CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1439,6 +1904,25 @@
 	},
 };
 
+static struct clk_regmap_mux dsi0pll_shadow_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_pclk_src_mux",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_bitclk_src",
+				"dsi0pll_shadow_post_bit_div",
+				"dsi0pll_shadow_pll_out_div",
+				"dsi0pll_shadow_post_vco_div"},
+			.num_parents = 4,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
 static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
 	.reg = PHY_CMN_CLK_CFG1,
 	.shift = 0,
@@ -1457,6 +1941,25 @@
 	},
 };
 
+static struct clk_regmap_mux dsi1pll_shadow_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_pclk_src_mux",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_bitclk_src",
+				"dsi1pll_shadow_post_bit_div",
+				"dsi1pll_shadow_pll_out_div",
+				"dsi1pll_shadow_post_vco_div"},
+			.num_parents = 4,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi0pll_pclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1472,6 +1975,21 @@
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi0pll_shadow_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_pclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1487,15 +2005,32 @@
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi1pll_shadow_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_mux dsi0pll_pclk_mux = {
 	.shift = 0,
 	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0_phy_pll_out_dsiclk",
-			.parent_names = (const char *[]){"dsi0pll_pclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi0pll_pclk_src",
+				"dsi0pll_shadow_pclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+				  CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1507,9 +2042,11 @@
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1_phy_pll_out_dsiclk",
-			.parent_names = (const char *[]){"dsi1pll_pclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi1pll_pclk_src",
+				"dsi1pll_shadow_pclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+				  CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1526,6 +2063,14 @@
 	[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
 	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
 	[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
+	[SHADOW_VCO_CLK_0] = &dsi0pll_shadow_vco_clk.hw,
+	[SHADOW_PLL_OUT_DIV_0_CLK] = &dsi0pll_shadow_pll_out_div.clkr.hw,
+	[SHADOW_BITCLK_SRC_0_CLK] = &dsi0pll_shadow_bitclk_src.clkr.hw,
+	[SHADOW_BYTECLK_SRC_0_CLK] = &dsi0pll_shadow_byteclk_src.hw,
+	[SHADOW_POST_BIT_DIV_0_CLK] = &dsi0pll_shadow_post_bit_div.hw,
+	[SHADOW_POST_VCO_DIV_0_CLK] = &dsi0pll_shadow_post_vco_div.hw,
+	[SHADOW_PCLK_SRC_MUX_0_CLK] = &dsi0pll_shadow_pclk_src_mux.clkr.hw,
+	[SHADOW_PCLK_SRC_0_CLK] = &dsi0pll_shadow_pclk_src.clkr.hw,
 	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
 	[PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
 	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
@@ -1536,6 +2081,14 @@
 	[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
 	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
 	[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
+	[SHADOW_VCO_CLK_1] = &dsi1pll_shadow_vco_clk.hw,
+	[SHADOW_PLL_OUT_DIV_1_CLK] = &dsi1pll_shadow_pll_out_div.clkr.hw,
+	[SHADOW_BITCLK_SRC_1_CLK] = &dsi1pll_shadow_bitclk_src.clkr.hw,
+	[SHADOW_BYTECLK_SRC_1_CLK] = &dsi1pll_shadow_byteclk_src.hw,
+	[SHADOW_POST_BIT_DIV_1_CLK] = &dsi1pll_shadow_post_bit_div.hw,
+	[SHADOW_POST_VCO_DIV_1_CLK] = &dsi1pll_shadow_post_vco_div.hw,
+	[SHADOW_PCLK_SRC_MUX_1_CLK] = &dsi1pll_shadow_pclk_src_mux.clkr.hw,
+	[SHADOW_PCLK_SRC_1_CLK] = &dsi1pll_shadow_pclk_src.clkr.hw,
 };
 
 int dsi_pll_clock_register_10nm(struct platform_device *pdev,
@@ -1580,18 +2133,20 @@
 
 	/* Establish client data */
 	if (ndx == 0) {
-
 		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_pll_out_div.clkr.regmap = rmap;
+		dsi0pll_shadow_pll_out_div.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_bitclk_src.clkr.regmap = rmap;
+		dsi0pll_shadow_bitclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_pclk_src.clkr.regmap = rmap;
+		dsi0pll_shadow_pclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
@@ -1600,12 +2155,16 @@
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_pclk_src_mux.clkr.regmap = rmap;
+		dsi0pll_shadow_pclk_src_mux.clkr.regmap = rmap;
+
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_byteclk_mux.clkr.regmap = rmap;
 
 		dsi0pll_vco_clk.priv = pll_res;
-		for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
+		dsi0pll_shadow_vco_clk.priv = pll_res;
+
+		for (i = VCO_CLK_0; i <= SHADOW_PCLK_SRC_0_CLK; i++) {
 			clk = devm_clk_register(&pdev->dev,
 						mdss_dsi_pllcc_10nm[i]);
 			if (IS_ERR(clk)) {
@@ -1620,20 +2179,21 @@
 
 		rc = of_clk_add_provider(pdev->dev.of_node,
 				of_clk_src_onecell_get, clk_data);
-
-
 	} else {
 		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_pll_out_div.clkr.regmap = rmap;
+		dsi1pll_shadow_pll_out_div.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_bitclk_src.clkr.regmap = rmap;
+		dsi1pll_shadow_bitclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_pclk_src.clkr.regmap = rmap;
+		dsi1pll_shadow_pclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
@@ -1642,12 +2202,16 @@
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_pclk_src_mux.clkr.regmap = rmap;
+		dsi1pll_shadow_pclk_src_mux.clkr.regmap = rmap;
+
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_byteclk_mux.clkr.regmap = rmap;
-		dsi1pll_vco_clk.priv = pll_res;
 
-		for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
+		dsi1pll_vco_clk.priv = pll_res;
+		dsi1pll_shadow_vco_clk.priv = pll_res;
+
+		for (i = VCO_CLK_1; i <= SHADOW_PCLK_SRC_1_CLK; i++) {
 			clk = devm_clk_register(&pdev->dev,
 						mdss_dsi_pllcc_10nm[i]);
 			if (IS_ERR(clk)) {
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 2f92270..e4b5184 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@
 		writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
 			(base) + (offset))
 
+#define upper_8_bit(x) ((((x) >> 2) & 0x100) >> 8)
+
 enum {
 	MDSS_DSI_PLL_10NM,
 	MDSS_DP_PLL_10NM,
@@ -45,30 +47,23 @@
 	MDSS_PLL_TARGET_8996,
 };
 
-#define DFPS_MAX_NUM_OF_FRAME_RATES 20
-
-struct dfps_panel_info {
-	uint32_t enabled;
-	uint32_t frame_rate_cnt;
-	uint32_t frame_rate[DFPS_MAX_NUM_OF_FRAME_RATES]; /* hz */
-};
+#define DFPS_MAX_NUM_OF_FRAME_RATES 16
 
 struct dfps_pll_codes {
 	uint32_t pll_codes_1;
 	uint32_t pll_codes_2;
+	uint32_t pll_codes_3;
 };
 
 struct dfps_codes_info {
 	uint32_t is_valid;
-	uint32_t frame_rate;	/* hz */
 	uint32_t clk_rate;	/* hz */
 	struct dfps_pll_codes pll_codes;
 };
 
 struct dfps_info {
-	struct dfps_panel_info panel_dfps;
+	uint32_t vco_rate_cnt;
 	struct dfps_codes_info codes_dfps[DFPS_MAX_NUM_OF_FRAME_RATES];
-	void *dfps_fb_base;
 };
 
 struct mdss_pll_resources {
@@ -139,7 +134,7 @@
 	/*
 	 * caching the pll trim codes in the case of dynamic refresh
 	 */
-	int		cache_pll_trim_codes[2];
+	int		cache_pll_trim_codes[3];
 
 	/*
 	 * for maintaining the status of saving trim codes
@@ -181,6 +176,11 @@
 	 */
 	struct dfps_info *dfps;
 
+	/*
+	 * for cases where dfps trigger happens before first
+	 * suspend/resume and handoff is not finished.
+	 */
+	bool dfps_trigger;
 };
 
 struct mdss_pll_vco_calc {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6e16d9f..8f9c8b6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2316,6 +2316,11 @@
 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
 			CPUFREQ_ADJUST, new_policy);
 
+	/* adjust if necessary - hardware incompatibility */
+	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+			CPUFREQ_INCOMPATIBLE, new_policy);
+
+
 	/*
 	 * verify the cpu speed can be set within this limit, which might be
 	 * different to the first one
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index c8d1158..d58144f 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1239,8 +1239,7 @@
 				goto exit;
 			}
 
-			k_align_dst += creq->vbuf.dst[dst_i].len +
-						byteoffset;
+			k_align_dst += creq->vbuf.dst[dst_i].len;
 			creq->data_len -= creq->vbuf.dst[dst_i].len;
 			dst_i++;
 		} else {
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 625ee50..decaed4 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -63,7 +63,7 @@
 	struct dcp_coherent_block	*coh;
 
 	struct completion		completion[DCP_MAX_CHANS];
-	struct mutex			mutex[DCP_MAX_CHANS];
+	spinlock_t			lock[DCP_MAX_CHANS];
 	struct task_struct		*thread[DCP_MAX_CHANS];
 	struct crypto_queue		queue[DCP_MAX_CHANS];
 };
@@ -349,13 +349,20 @@
 
 	int ret;
 
-	do {
-		__set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
 
-		mutex_lock(&sdcp->mutex[chan]);
+		spin_lock(&sdcp->lock[chan]);
 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
-		mutex_unlock(&sdcp->mutex[chan]);
+		spin_unlock(&sdcp->lock[chan]);
+
+		if (!backlog && !arq) {
+			schedule();
+			continue;
+		}
+
+		set_current_state(TASK_RUNNING);
 
 		if (backlog)
 			backlog->complete(backlog, -EINPROGRESS);
@@ -363,11 +370,8 @@
 		if (arq) {
 			ret = mxs_dcp_aes_block_crypt(arq);
 			arq->complete(arq, ret);
-			continue;
 		}
-
-		schedule();
-	} while (!kthread_should_stop());
+	}
 
 	return 0;
 }
@@ -409,9 +413,9 @@
 	rctx->ecb = ecb;
 	actx->chan = DCP_CHAN_CRYPTO;
 
-	mutex_lock(&sdcp->mutex[actx->chan]);
+	spin_lock(&sdcp->lock[actx->chan]);
 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-	mutex_unlock(&sdcp->mutex[actx->chan]);
+	spin_unlock(&sdcp->lock[actx->chan]);
 
 	wake_up_process(sdcp->thread[actx->chan]);
 
@@ -640,13 +644,20 @@
 	struct ahash_request *req;
 	int ret, fini;
 
-	do {
-		__set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
 
-		mutex_lock(&sdcp->mutex[chan]);
+		spin_lock(&sdcp->lock[chan]);
 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
-		mutex_unlock(&sdcp->mutex[chan]);
+		spin_unlock(&sdcp->lock[chan]);
+
+		if (!backlog && !arq) {
+			schedule();
+			continue;
+		}
+
+		set_current_state(TASK_RUNNING);
 
 		if (backlog)
 			backlog->complete(backlog, -EINPROGRESS);
@@ -658,12 +669,8 @@
 			ret = dcp_sha_req_to_buf(arq);
 			fini = rctx->fini;
 			arq->complete(arq, ret);
-			if (!fini)
-				continue;
 		}
-
-		schedule();
-	} while (!kthread_should_stop());
+	}
 
 	return 0;
 }
@@ -721,9 +728,9 @@
 		rctx->init = 1;
 	}
 
-	mutex_lock(&sdcp->mutex[actx->chan]);
+	spin_lock(&sdcp->lock[actx->chan]);
 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-	mutex_unlock(&sdcp->mutex[actx->chan]);
+	spin_unlock(&sdcp->lock[actx->chan]);
 
 	wake_up_process(sdcp->thread[actx->chan]);
 	mutex_unlock(&actx->mutex);
@@ -979,7 +986,7 @@
 	platform_set_drvdata(pdev, sdcp);
 
 	for (i = 0; i < DCP_MAX_CHANS; i++) {
-		mutex_init(&sdcp->mutex[i]);
+		spin_lock_init(&sdcp->lock[i]);
 		init_completion(&sdcp->completion[i]);
 		crypto_init_queue(&sdcp->queue[i], 50);
 	}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index 640c3fc..ad9d6fb 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -123,7 +123,8 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 	switch (ent->device) {
 	case ADF_C3XXX_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@
 	/* Find and map all the device's BARS */
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 949d77b..0dd8d2d 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -125,7 +125,8 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 	switch (ent->device) {
 	case ADF_C3XXXIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@
 	/* Find and map all the device's BARS */
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 5b2d78a..dcdb94c 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -123,7 +123,8 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 	switch (ent->device) {
 	case ADF_C62X_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@
 	/* Find and map all the device's BARS */
 	i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 7540ce1..cd9e634 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -125,7 +125,8 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 	switch (ent->device) {
 	case ADF_C62XIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@
 	/* Find and map all the device's BARS */
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 4d2de28..3417443 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -123,7 +123,8 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 	switch (ent->device) {
 	case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -237,8 +238,7 @@
 	/* Find and map all the device's BARS */
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 60df986..15de9cb 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -125,7 +125,8 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 	switch (ent->device) {
 	case ADF_DH895XCCIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@
 	/* Find and map all the device's BARS */
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c
index 9c24eef..f7bb7eb 100644
--- a/drivers/devfreq/governor_gpubw_mon.c
+++ b/drivers/devfreq/governor_gpubw_mon.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -224,10 +224,11 @@
 	case DEVFREQ_GOV_SUSPEND:
 		{
 			struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
-
-			priv->bus.total_time = 0;
-			priv->bus.gpu_time = 0;
-			priv->bus.ram_time = 0;
+			if (priv) {
+				priv->bus.total_time = 0;
+				priv->bus.gpu_time = 0;
+				priv->bus.ram_time = 0;
+			}
 		}
 		break;
 	default:
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index bea71fb..7335a86 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -1059,14 +1059,14 @@
 
 	err = device_add(mci_pdev);
 	if (err < 0)
-		goto out_dev_free;
+		goto out_put_device;
 
 	edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
 
 	return 0;
 
- out_dev_free:
-	kfree(mci_pdev);
+ out_put_device:
+	put_device(mci_pdev);
  out:
 	return err;
 }
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 8a68a5e..b609320 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1177,15 +1177,14 @@
 
 	rc = device_add(pvt->addrmatch_dev);
 	if (rc < 0)
-		return rc;
+		goto err_put_addrmatch;
 
 	if (!pvt->is_registered) {
 		pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
 					      GFP_KERNEL);
 		if (!pvt->chancounts_dev) {
-			put_device(pvt->addrmatch_dev);
-			device_del(pvt->addrmatch_dev);
-			return -ENOMEM;
+			rc = -ENOMEM;
+			goto err_del_addrmatch;
 		}
 
 		pvt->chancounts_dev->type = &all_channel_counts_type;
@@ -1199,9 +1198,18 @@
 
 		rc = device_add(pvt->chancounts_dev);
 		if (rc < 0)
-			return rc;
+			goto err_put_chancounts;
 	}
 	return 0;
+
+err_put_chancounts:
+	put_device(pvt->chancounts_dev);
+err_del_addrmatch:
+	device_del(pvt->addrmatch_dev);
+err_put_addrmatch:
+	put_device(pvt->addrmatch_dev);
+
+	return rc;
 }
 
 static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
@@ -1211,11 +1219,11 @@
 	edac_dbg(1, "\n");
 
 	if (!pvt->is_registered) {
-		put_device(pvt->chancounts_dev);
 		device_del(pvt->chancounts_dev);
+		put_device(pvt->chancounts_dev);
 	}
-	put_device(pvt->addrmatch_dev);
 	device_del(pvt->addrmatch_dev);
+	put_device(pvt->addrmatch_dev);
 }
 
 /****************************************************************************
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index c0f718b..c85407a 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -41,6 +41,8 @@
 	uint8_t int_en[3];
 	uint8_t irq_mask[3];
 	uint8_t irq_stat[3];
+	uint8_t int_input_en[3];
+	uint8_t int_lvl_cached[3];
 };
 
 static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -173,12 +175,28 @@
 	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
 	int i;
 
-	for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+	for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+		if (dev->int_input_en[i]) {
+			mutex_lock(&dev->lock);
+			dev->dir[i] &= ~dev->int_input_en[i];
+			dev->int_input_en[i] = 0;
+			adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
+					   dev->dir[i]);
+			mutex_unlock(&dev->lock);
+		}
+
+		if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
+			dev->int_lvl_cached[i] = dev->int_lvl[i];
+			adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
+					   dev->int_lvl[i]);
+		}
+
 		if (dev->int_en[i] ^ dev->irq_mask[i]) {
 			dev->int_en[i] = dev->irq_mask[i];
 			adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
 					   dev->int_en[i]);
 		}
+	}
 
 	mutex_unlock(&dev->irq_lock);
 }
@@ -221,9 +239,7 @@
 	else
 		return -EINVAL;
 
-	adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
-	adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
-			   dev->int_lvl[bank]);
+	dev->int_input_en[bank] |= bit;
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index a1210e3..95061d2 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -56,9 +56,9 @@
 		rnd = fls(debounce) - 1;
 
 		if (rnd && (debounce & BIT(rnd - 1)))
-			debounce = round_up(debounce, MEN_Z127_DB_MIN_US);
+			debounce = roundup(debounce, MEN_Z127_DB_MIN_US);
 		else
-			debounce = round_down(debounce, MEN_Z127_DB_MIN_US);
+			debounce = rounddown(debounce, MEN_Z127_DB_MIN_US);
 
 		if (debounce > MEN_Z127_DB_MAX_US)
 			debounce = MEN_Z127_DB_MAX_US;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 193f15d..aac8432 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -31,6 +31,7 @@
 	struct of_phandle_args *gpiospec = data;
 
 	return chip->gpiodev->dev.of_node == gpiospec->np &&
+				chip->of_xlate &&
 				chip->of_xlate(chip, gpiospec, NULL) >= 0;
 }
 
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index dd00764..2ec402a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -471,7 +471,7 @@
 		if (ret)
 			goto out_free_descs;
 		lh->descs[i] = desc;
-		count = i;
+		count = i + 1;
 
 		if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
 			set_bit(FLAG_ACTIVE_LOW, &desc->flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 47951f4..d47c32a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -505,7 +505,7 @@
 
 	while (true) {
 		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
-		if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
 			break;
 		if (timeout <= 0)
 			return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 564362e..c8a5cf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -5551,6 +5551,11 @@
 	if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
 		return 0;
 
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+				AMD_PG_SUPPORT_RLC_SMU_HS |
+				AMD_PG_SUPPORT_CP |
+				AMD_PG_SUPPORT_GFX_DMG))
+		adev->gfx.rlc.funcs->enter_safe_mode(adev);
 	switch (adev->asic_type) {
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
@@ -5586,7 +5591,11 @@
 	default:
 		break;
 	}
-
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+				AMD_PG_SUPPORT_RLC_SMU_HS |
+				AMD_PG_SUPPORT_CP |
+				AMD_PG_SUPPORT_GFX_DMG))
+		adev->gfx.rlc.funcs->exit_safe_mode(adev);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 71d2856..f61c489 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -1350,8 +1350,6 @@
 		return ret;
 	}
 
-	kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
-
 	if (adev->irq.installed &&
 	    amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
 		ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
@@ -3086,7 +3084,7 @@
 	else
 		adev->pm.dpm_enabled = true;
 	mutex_unlock(&adev->pm.mutex);
-
+	amdgpu_pm_compute_clocks(adev);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 3fa8320..4826bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6959,7 +6959,6 @@
 
 	si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 	si_thermal_start_thermal_controller(adev);
-	ni_update_current_ps(adev, boot_ps);
 
 	return 0;
 }
@@ -7836,7 +7835,7 @@
 	else
 		adev->pm.dpm_enabled = true;
 	mutex_unlock(&adev->pm.mutex);
-
+	amdgpu_pm_compute_clocks(adev);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a0a0daf..3664700 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -86,7 +86,7 @@
 
 	struct workqueue_struct *wq;
 	struct delayed_work hdcp_cb_work;
-	struct delayed_work connect_work;
+	struct work_struct connect_work;
 	struct work_struct attention_work;
 	struct mutex hdcp_mutex;
 	struct mutex session_lock;
@@ -456,7 +456,7 @@
 
 	/* if cable is already connected, send notification */
 	if (dp->usbpd->hpd_high)
-		queue_delayed_work(dp->wq, &dp->connect_work, HZ * 10);
+		queue_work(dp->wq, &dp->connect_work);
 	else
 		dp_display->post_open = NULL;
 }
@@ -464,15 +464,12 @@
 static int dp_display_send_hpd_notification(struct dp_display_private *dp,
 		bool hpd)
 {
-	u32 timeout_sec;
 	int ret = 0;
 
 	dp->dp_display.is_connected = hpd;
 
-	if  (dp_display_framework_ready(dp))
-		timeout_sec = 5;
-	else
-		timeout_sec = 10;
+	if (!dp_display_framework_ready(dp))
+		return ret;
 
 	dp->aux->state |= DP_STATE_NOTIFICATION_SENT;
 
@@ -480,7 +477,7 @@
 	dp_display_send_hpd_event(dp);
 
 	if (!wait_for_completion_timeout(&dp->notification_comp,
-						HZ * timeout_sec)) {
+						HZ * 5)) {
 		pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
 		ret = -EINVAL;
 	}
@@ -614,9 +611,9 @@
 
 	dp_display_host_init(dp);
 
-	/* check for hpd high and framework ready */
-	if  (dp->usbpd->hpd_high && dp_display_framework_ready(dp))
-		queue_delayed_work(dp->wq, &dp->connect_work, 0);
+	/* check for hpd high */
+	if  (dp->usbpd->hpd_high)
+		queue_work(dp->wq, &dp->connect_work);
 end:
 	return rc;
 }
@@ -695,7 +692,7 @@
 	dp->aux->abort(dp->aux);
 
 	/* wait for idle state */
-	cancel_delayed_work(&dp->connect_work);
+	cancel_work(&dp->connect_work);
 	cancel_work(&dp->attention_work);
 	flush_workqueue(dp->wq);
 
@@ -737,7 +734,7 @@
 			return;
 		}
 
-		queue_delayed_work(dp->wq, &dp->connect_work, 0);
+		queue_work(dp->wq, &dp->connect_work);
 		return;
 	}
 
@@ -783,18 +780,12 @@
 		return -ENODEV;
 	}
 
-	/* check if framework is ready */
-	if (!dp_display_framework_ready(dp)) {
-		pr_err("framework not ready\n");
-		return -ENODEV;
-	}
-
 	if (dp->usbpd->hpd_irq && dp->usbpd->hpd_high &&
 	    dp->power_on) {
 		dp->link->process_request(dp->link);
 		queue_work(dp->wq, &dp->attention_work);
 	} else if (dp->usbpd->hpd_high) {
-		queue_delayed_work(dp->wq, &dp->connect_work, 0);
+		queue_work(dp->wq, &dp->connect_work);
 	} else {
 		/* cancel any pending request */
 		atomic_set(&dp->aborted, 1);
@@ -802,7 +793,7 @@
 		dp->aux->abort(dp->aux);
 
 		/* wait for idle state */
-		cancel_delayed_work(&dp->connect_work);
+		cancel_work(&dp->connect_work);
 		cancel_work(&dp->attention_work);
 		flush_workqueue(dp->wq);
 
@@ -815,8 +806,7 @@
 
 static void dp_display_connect_work(struct work_struct *work)
 {
-	struct delayed_work *dw = to_delayed_work(work);
-	struct dp_display_private *dp = container_of(dw,
+	struct dp_display_private *dp = container_of(work,
 			struct dp_display_private, connect_work);
 
 	if (dp->dp_display.is_connected && dp_display_framework_ready(dp)) {
@@ -1376,7 +1366,7 @@
 	}
 
 	INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work);
-	INIT_DELAYED_WORK(&dp->connect_work, dp_display_connect_work);
+	INIT_WORK(&dp->connect_work, dp_display_connect_work);
 	INIT_WORK(&dp->attention_work, dp_display_attention_work);
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index 011e3b8..efb36bf 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -70,6 +70,8 @@
 	ctrl->ops.wait_for_cmd_mode_mdp_idle =
 		dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle;
 	ctrl->ops.set_continuous_clk = dsi_ctrl_hw_cmn_set_continuous_clk;
+	ctrl->ops.wait4dynamic_refresh_done =
+		dsi_ctrl_hw_cmn_wait4dynamic_refresh_done;
 
 	switch (version) {
 	case DSI_CTRL_VERSION_1_4:
@@ -218,6 +220,14 @@
 	phy->ops.clamp_ctrl = dsi_phy_hw_v3_0_clamp_ctrl;
 	phy->ops.phy_lane_reset = dsi_phy_hw_v3_0_lane_reset;
 	phy->ops.toggle_resync_fifo = dsi_phy_hw_v3_0_toggle_resync_fifo;
+	phy->ops.dyn_refresh_ops.dyn_refresh_config =
+		dsi_phy_hw_v3_0_dyn_refresh_config;
+	phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay =
+		dsi_phy_hw_v3_0_dyn_refresh_pipe_delay;
+	phy->ops.dyn_refresh_ops.dyn_refresh_helper =
+		dsi_phy_hw_v3_0_dyn_refresh_helper;
+	phy->ops.dyn_refresh_ops.cache_phy_timings =
+		dsi_phy_hw_v3_0_cache_phy_timings;
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index c55bbe0..944dd52 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -66,15 +66,17 @@
  * @mode:       DSI mode information.
  * @host:       DSI host configuration.
  * @timing:     DSI phy lane configurations.
+ * @use_mode_bit_clk: Boolean to indicate whether to recalculate bit clk.
  *
  * This function setups the catalog information in the dsi_phy_hw object.
  *
  * return: error code for failure and 0 for success.
  */
 int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
-					    struct dsi_mode_info *mode,
-	struct dsi_host_common_cfg *host,
-	struct dsi_phy_per_lane_cfgs *timing);
+				       struct dsi_mode_info *mode,
+				       struct dsi_host_common_cfg *host,
+				       struct dsi_phy_per_lane_cfgs *timing,
+				       bool use_mode_bit_clk);
 
 /* Definitions for 14nm PHY hardware driver */
 void dsi_phy_hw_v2_0_regulator_enable(struct dsi_phy_hw *phy,
@@ -226,4 +228,14 @@
 
 void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable);
 
+/* dynamic refresh specific functions */
+void dsi_phy_hw_v3_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset);
+void dsi_phy_hw_v3_0_dyn_refresh_config(struct dsi_phy_hw *phy,
+				struct dsi_phy_cfg *cfg, bool is_master);
+void dsi_phy_hw_v3_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
+					    struct dsi_dyn_clk_delay *delay);
+
+int dsi_ctrl_hw_cmn_wait4dynamic_refresh_done(struct dsi_ctrl_hw *ctrl);
+int dsi_phy_hw_v3_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
+				      u32 *dst, u32 size);
 #endif /* _DSI_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
index bdc60d2..cdcb331 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
@@ -317,4 +317,18 @@
  */
 int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
 			  struct dsi_clk_link_set *child);
+
+/**
+ * dsi_clk_prepare_enable() - prepare and enable dsi src clocks
+ * @clk:       list of src clocks.
+ *
+ * @return:	Zero on success and err no on failure
+ */
+int dsi_clk_prepare_enable(struct dsi_clk_link_set *clk);
+
+/**
+ * dsi_clk_disable_unprepare() - disable and unprepare dsi src clocks
+ * @clk:       list of src clocks.
+ */
+void dsi_clk_disable_unprepare(struct dsi_clk_link_set *clk);
 #endif /* _DSI_CLK_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index fdaf283..9592603f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -113,8 +113,9 @@
 
 /**
  * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
- * @clks:      DSI link clock information.
- * @pixel_clk: Pixel clock rate in KHz.
+ * @clks:	DSI link clock information.
+ * @pixel_clk:	Pixel clock rate in KHz.
+ * @index:	Index of the DSI controller.
  *
  * return: error code in case of failure or 0 for success.
  */
@@ -136,9 +137,9 @@
 
 /**
  * dsi_clk_set_byte_clk_rate() - set frequency for byte clock
- * @client:       DSI clock client pointer.
- * @byte_clk: Pixel clock rate in Hz.
- * @index:      Index of the DSI controller.
+ * @client:	DSI clock client pointer.
+ * @byte_clk:	Byte clock rate in Hz.
+ * @index:	Index of the DSI controller.
  * return: error code in case of failure or 0 for success.
  */
 int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, u32 index)
@@ -146,6 +147,7 @@
 	int rc = 0;
 	struct dsi_clk_client_info *c = client;
 	struct dsi_clk_mngr *mngr;
+	u64 byte_intf_rate;
 
 	mngr = c->mngr;
 	rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_clk, byte_clk);
@@ -154,8 +156,16 @@
 	else
 		mngr->link_clks[index].freq.byte_clk_rate = byte_clk;
 
-	return rc;
+	if (mngr->link_clks[index].hs_clks.byte_intf_clk) {
+		byte_intf_rate = mngr->link_clks[index].freq.byte_clk_rate / 2;
+		rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_intf_clk,
+				  byte_intf_rate);
+		if (rc)
+			pr_err("failed to set clk rate for byte intf clk=%d\n",
+			       rc);
+	}
 
+	return rc;
 }
 
 /**
@@ -183,6 +193,41 @@
 	return rc;
 }
 
+/**
+ * dsi_clk_prepare_enable() - prepare and enable dsi src clocks
+ * @clk:       list of src clocks.
+ *
+ * @return:	Zero on success and err no on failure.
+ */
+int dsi_clk_prepare_enable(struct dsi_clk_link_set *clk)
+{
+	int rc;
+
+	rc = clk_prepare_enable(clk->byte_clk);
+	if (rc) {
+		pr_err("failed to enable byte src clk %d\n", rc);
+		return rc;
+	}
+
+	rc = clk_prepare_enable(clk->pixel_clk);
+	if (rc) {
+		pr_err("failed to enable pixel src clk %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * dsi_clk_disable_unprepare() - disable and unprepare dsi src clocks
+ * @clk:       list of src clocks.
+ */
+void dsi_clk_disable_unprepare(struct dsi_clk_link_set *clk)
+{
+	clk_disable_unprepare(clk->pixel_clk);
+	clk_disable_unprepare(clk->byte_clk);
+}
+
 int dsi_core_clk_start(struct dsi_core_clks *c_clks)
 {
 	int rc = 0;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 31c3b1a..378ef4c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -2736,7 +2736,12 @@
 		goto error;
 	}
 
-	if (!(flags & (DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR))) {
+	if (!(flags & (DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR |
+		       DSI_MODE_FLAG_DYN_CLK))) {
+		/*
+		 * for dynamic clk swith case link frequence would
+		 * be updated dsi_display_dynamic_clk_switch().
+		 */
 		rc = dsi_ctrl_update_link_freqs(ctrl, config, clk_handle);
 		if (rc) {
 			pr_err("[%s] failed to update link frequencies, rc=%d\n",
@@ -3455,6 +3460,27 @@
 }
 
 /**
+ * dsi_ctrl_wait4dynamic_refresh_done() - Poll for dynamci refresh
+ *				done interrupt.
+ * @dsi_ctrl:              DSI controller handle.
+ */
+int dsi_ctrl_wait4dynamic_refresh_done(struct dsi_ctrl *ctrl)
+{
+	int rc = 0;
+
+	if (!ctrl)
+		return 0;
+
+	mutex_lock(&ctrl->ctrl_lock);
+
+	if (ctrl->hw.ops.wait4dynamic_refresh_done)
+		rc = ctrl->hw.ops.wait4dynamic_refresh_done(&ctrl->hw);
+
+	mutex_unlock(&ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
  * dsi_ctrl_drv_register() - register platform driver for dsi controller
  */
 void dsi_ctrl_drv_register(void)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 61c6116..47009bf 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -783,4 +783,11 @@
  * @enable:			   variable to control continuous clock.
  */
 void dsi_ctrl_set_continuous_clk(struct dsi_ctrl *dsi_ctrl, bool enable);
+
+/**
+ * dsi_ctrl_wait4dynamic_refresh_done() - Poll for dynamic refresh done
+ *					interrupt.
+ * @dsi_ctrl:                      DSI controller handle.
+ */
+int dsi_ctrl_wait4dynamic_refresh_done(struct dsi_ctrl *ctrl);
 #endif /* _DSI_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 348ef36..f34cb10 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -810,6 +810,12 @@
 	 * @enable:	  Bool to control continuous clock request.
 	 */
 	void (*set_continuous_clk)(struct dsi_ctrl_hw *ctrl, bool enable);
+
+	/**
+	 * hw.ops.wait4dynamic_refresh_done() - Wait for dynamic refresh done
+	 * @ctrl:         Pointer to the controller host hardware.
+	 */
+	int (*wait4dynamic_refresh_done)(struct dsi_ctrl_hw *ctrl);
 };
 
 /*
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 7c58c43..7139a51 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -1443,6 +1443,13 @@
 			reg &= ~(0x7 << 23);
 	}
 
+	if (idx & BIT(DSI_PLL_UNLOCK_ERR)) {
+		if (en)
+			reg |= BIT(28);
+		else
+			reg &= ~BIT(28);
+	}
+
 	DSI_W32(ctrl, 0x10c, reg);
 	wmb(); /* ensure error is masked */
 }
@@ -1509,3 +1516,25 @@
 	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
 	wmb(); /* make sure request is set */
 }
+
+int dsi_ctrl_hw_cmn_wait4dynamic_refresh_done(struct dsi_ctrl_hw *ctrl)
+{
+	int rc;
+	u32 const sleep_us = 1000;
+	u32 const timeout_us = 84000; /* approximately 5 vsyncs */
+	u32 reg = 0, dyn_refresh_done = BIT(28);
+
+	rc = readl_poll_timeout(ctrl->base + DSI_INT_CTRL, reg,
+				(reg & dyn_refresh_done), sleep_us, timeout_us);
+	if (rc) {
+		pr_err("wait4dynamic refresh timedout %d\n", rc);
+		return rc;
+	}
+
+	/* ack dynamic refresh done status */
+	reg = DSI_R32(ctrl, DSI_INT_CTRL);
+	reg |= dyn_refresh_done;
+	DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
index 39ac021..0ee8b39 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -138,44 +138,7 @@
 #define DSI_SCRATCH_REGISTER_1                     (0x01F8)
 #define DSI_SCRATCH_REGISTER_2                     (0x01FC)
 #define DSI_DYNAMIC_REFRESH_CTRL                   (0x0200)
-#define DSI_DYNAMIC_REFRESH_PIPE_DELAY             (0x0204)
-#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2            (0x0208)
-#define DSI_DYNAMIC_REFRESH_PLL_DELAY              (0x020C)
 #define DSI_DYNAMIC_REFRESH_STATUS                 (0x0210)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL0              (0x0214)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL1              (0x0218)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL2              (0x021C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL3              (0x0220)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL4              (0x0224)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL5              (0x0228)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL6              (0x022C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL7              (0x0230)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL8              (0x0234)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL9              (0x0238)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL10             (0x023C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL11             (0x0240)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL12             (0x0244)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL13             (0x0248)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL14             (0x024C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL15             (0x0250)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL16             (0x0254)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL17             (0x0258)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL18             (0x025C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL19             (0x0260)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL20             (0x0264)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL21             (0x0268)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL22             (0x026C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL23             (0x0270)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL24             (0x0274)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL25             (0x0278)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL26             (0x027C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL27             (0x0280)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL28             (0x0284)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL29             (0x0288)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL30             (0x028C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL31             (0x0290)
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR         (0x0294)
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2        (0x0298)
 #define DSI_VIDEO_COMPRESSION_MODE_CTRL            (0x02A0)
 #define DSI_VIDEO_COMPRESSION_MODE_CTRL2           (0x02A4)
 #define DSI_COMMAND_COMPRESSION_MODE_CTRL          (0x02A8)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index 3b2ef70..a6ada73 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -79,6 +79,7 @@
  * @DSI_MODE_FLAG_DMS: Seamless transition is dynamic mode switch
  * @DSI_MODE_FLAG_VRR: Seamless transition is DynamicFPS.
  *                     New timing values are sent from DAL.
+ * @DSI_MODE_FLAG_DYN_CLK: Seamless transition is dynamic clock change
  */
 enum dsi_mode_flags {
 	DSI_MODE_FLAG_SEAMLESS			= BIT(0),
@@ -86,6 +87,7 @@
 	DSI_MODE_FLAG_VBLANK_PRE_MODESET	= BIT(2),
 	DSI_MODE_FLAG_DMS			= BIT(3),
 	DSI_MODE_FLAG_VRR			= BIT(4),
+	DSI_MODE_FLAG_DYN_CLK			= BIT(5),
 };
 
 /**
@@ -595,12 +597,50 @@
  * @DSI_FIFO_OVERFLOW:     DSI FIFO Overflow error
  * @DSI_FIFO_UNDERFLOW:    DSI FIFO Underflow error
  * @DSI_LP_Rx_TIMEOUT:     DSI LP/RX Timeout error
+ * @DSI_PLL_UNLOCK_ERR:	   DSI PLL unlock error
  */
 enum dsi_error_status {
 	DSI_FIFO_OVERFLOW = 1,
 	DSI_FIFO_UNDERFLOW,
 	DSI_LP_Rx_TIMEOUT,
+	DSI_PLL_UNLOCK_ERR,
 	DSI_ERR_INTR_ALL,
 };
 
+/* structure containing the delays required for dynamic clk */
+struct dsi_dyn_clk_delay {
+	u32 pipe_delay;
+	u32 pipe_delay2;
+	u32 pll_delay;
+};
+
+/* dynamic refresh control bits */
+enum dsi_dyn_clk_control_bits {
+	DYN_REFRESH_INTF_SEL = 1,
+	DYN_REFRESH_SYNC_MODE,
+	DYN_REFRESH_SW_TRIGGER,
+	DYN_REFRESH_SWI_CTRL,
+};
+
+/* convert dsi pixel format into bits per pixel */
+static inline int dsi_pixel_format_to_bpp(enum dsi_pixel_format fmt)
+{
+	switch (fmt) {
+	case DSI_PIXEL_FORMAT_RGB888:
+	case DSI_PIXEL_FORMAT_MAX:
+		return 24;
+	case DSI_PIXEL_FORMAT_RGB666:
+	case DSI_PIXEL_FORMAT_RGB666_LOOSE:
+		return 18;
+	case DSI_PIXEL_FORMAT_RGB565:
+		return 16;
+	case DSI_PIXEL_FORMAT_RGB111:
+		return 3;
+	case DSI_PIXEL_FORMAT_RGB332:
+		return 8;
+	case DSI_PIXEL_FORMAT_RGB444:
+		return 12;
+	}
+	return 24;
+}
 #endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index dbc94a1..f8170b2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1505,14 +1505,12 @@
 static void adjust_timing_by_ctrl_count(const struct dsi_display *display,
 					struct dsi_display_mode *mode)
 {
-	if (display->ctrl_count > 1) {
-		mode->timing.h_active /= display->ctrl_count;
-		mode->timing.h_front_porch /= display->ctrl_count;
-		mode->timing.h_sync_width /= display->ctrl_count;
-		mode->timing.h_back_porch /= display->ctrl_count;
-		mode->timing.h_skew /= display->ctrl_count;
-		mode->pixel_clk_khz /= display->ctrl_count;
-	}
+	mode->timing.h_active /= display->ctrl_count;
+	mode->timing.h_front_porch /= display->ctrl_count;
+	mode->timing.h_sync_width /= display->ctrl_count;
+	mode->timing.h_back_porch /= display->ctrl_count;
+	mode->timing.h_skew /= display->ctrl_count;
+	mode->pixel_clk_khz /= display->ctrl_count;
 }
 
 static int dsi_display_is_ulps_req_valid(struct dsi_display *display,
@@ -2220,7 +2218,7 @@
 	m_ctrl = &display->ctrl[display->clk_master_idx];
 
 	rc = dsi_ctrl_set_clock_source(m_ctrl->ctrl,
-		   &display->clock_info.src_clks);
+		   &display->clock_info.mux_clks);
 	if (rc) {
 		pr_err("[%s] failed to set source clocks for master, rc=%d\n",
 			   display->name, rc);
@@ -2234,7 +2232,7 @@
 			continue;
 
 		rc = dsi_ctrl_set_clock_source(ctrl->ctrl,
-			   &display->clock_info.src_clks);
+			   &display->clock_info.mux_clks);
 		if (rc) {
 			pr_err("[%s] failed to set source clocks, rc=%d\n",
 				   display->name, rc);
@@ -2957,13 +2955,37 @@
 	struct dsi_clk_link_set *src = &display->clock_info.src_clks;
 	struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
 	struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+	struct dsi_dyn_clk_caps *dyn_clk_caps = &(display->panel->dyn_clk_caps);
+
+	mux->byte_clk = devm_clk_get(&display->pdev->dev, "mux_byte_clk");
+	if (IS_ERR_OR_NULL(mux->byte_clk)) {
+		rc = PTR_ERR(mux->byte_clk);
+		pr_err("failed to get mux_byte_clk, rc=%d\n", rc);
+		mux->byte_clk = NULL;
+		goto error;
+	};
+
+	mux->pixel_clk = devm_clk_get(&display->pdev->dev, "mux_pixel_clk");
+	if (IS_ERR_OR_NULL(mux->pixel_clk)) {
+		rc = PTR_ERR(mux->pixel_clk);
+		mux->pixel_clk = NULL;
+		pr_err("failed to get mux_pixel_clk, rc=%d\n", rc);
+		goto error;
+	};
 
 	src->byte_clk = devm_clk_get(&display->pdev->dev, "src_byte_clk");
 	if (IS_ERR_OR_NULL(src->byte_clk)) {
 		rc = PTR_ERR(src->byte_clk);
 		src->byte_clk = NULL;
 		pr_err("failed to get src_byte_clk, rc=%d\n", rc);
-		goto error;
+		/*
+		 * Skip getting rest of clocks since one failed. This is a
+		 * non-critical failure since these clocks are requied only for
+		 * dynamic refresh use cases.
+		 */
+		rc = 0;
+		dyn_clk_caps->dyn_clk_support = false;
+		goto done;
 	}
 
 	src->pixel_clk = devm_clk_get(&display->pdev->dev, "src_pixel_clk");
@@ -2971,37 +2993,16 @@
 		rc = PTR_ERR(src->pixel_clk);
 		src->pixel_clk = NULL;
 		pr_err("failed to get src_pixel_clk, rc=%d\n", rc);
-		goto error;
+		/*
+		 * Skip getting rest of clocks since one failed. This is a
+		 * non-critical failure since these clocks are requied only for
+		 * dynamic refresh use cases.
+		 */
+		rc = 0;
+		dyn_clk_caps->dyn_clk_support = false;
+		goto done;
 	}
 
-	mux->byte_clk = devm_clk_get(&display->pdev->dev, "mux_byte_clk");
-	if (IS_ERR_OR_NULL(mux->byte_clk)) {
-		rc = PTR_ERR(mux->byte_clk);
-		pr_debug("failed to get mux_byte_clk, rc=%d\n", rc);
-		mux->byte_clk = NULL;
-		/*
-		 * Skip getting rest of clocks since one failed. This is a
-		 * non-critical failure since these clocks are requied only for
-		 * dynamic refresh use cases.
-		 */
-		rc = 0;
-		goto done;
-	};
-
-	mux->pixel_clk = devm_clk_get(&display->pdev->dev, "mux_pixel_clk");
-	if (IS_ERR_OR_NULL(mux->pixel_clk)) {
-		rc = PTR_ERR(mux->pixel_clk);
-		mux->pixel_clk = NULL;
-		pr_debug("failed to get mux_pixel_clk, rc=%d\n", rc);
-		/*
-		 * Skip getting rest of clocks since one failed. This is a
-		 * non-critical failure since these clocks are requied only for
-		 * dynamic refresh use cases.
-		 */
-		rc = 0;
-		goto done;
-	};
-
 	shadow->byte_clk = devm_clk_get(&display->pdev->dev, "shadow_byte_clk");
 	if (IS_ERR_OR_NULL(shadow->byte_clk)) {
 		rc = PTR_ERR(shadow->byte_clk);
@@ -3013,6 +3014,7 @@
 		 * dynamic refresh use cases.
 		 */
 		rc = 0;
+		dyn_clk_caps->dyn_clk_support = false;
 		goto done;
 	};
 
@@ -3028,6 +3030,7 @@
 		 * dynamic refresh use cases.
 		 */
 		rc = 0;
+		dyn_clk_caps->dyn_clk_support = false;
 		goto done;
 	};
 
@@ -3722,6 +3725,305 @@
 	return true;
 }
 
+static int dsi_display_update_dsi_bitrate(struct dsi_display *display,
+					  u32 bit_clk_rate)
+{
+	int rc = 0;
+	int i;
+
+	pr_debug("%s:bit rate:%d\n", __func__, bit_clk_rate);
+	if (!display->panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	if (bit_clk_rate == 0) {
+		pr_err("Invalid bit clock rate\n");
+		return -EINVAL;
+	}
+
+	display->config.bit_clk_rate_hz = bit_clk_rate;
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		struct dsi_display_ctrl *dsi_disp_ctrl = &display->ctrl[i];
+		struct dsi_ctrl *ctrl = dsi_disp_ctrl->ctrl;
+		u32 num_of_lanes = 0, bpp;
+		u64 bit_rate, pclk_rate, bit_rate_per_lane, byte_clk_rate;
+		struct dsi_host_common_cfg *host_cfg;
+
+		mutex_lock(&ctrl->ctrl_lock);
+
+		host_cfg = &display->panel->host_config;
+		if (host_cfg->data_lanes & DSI_DATA_LANE_0)
+			num_of_lanes++;
+		if (host_cfg->data_lanes & DSI_DATA_LANE_1)
+			num_of_lanes++;
+		if (host_cfg->data_lanes & DSI_DATA_LANE_2)
+			num_of_lanes++;
+		if (host_cfg->data_lanes & DSI_DATA_LANE_3)
+			num_of_lanes++;
+
+		if (num_of_lanes == 0) {
+			pr_err("Invalid lane count\n");
+			rc = -EINVAL;
+			goto error;
+		}
+
+		bpp = dsi_pixel_format_to_bpp(host_cfg->dst_format);
+
+		bit_rate = display->config.bit_clk_rate_hz * num_of_lanes;
+		bit_rate_per_lane = bit_rate;
+		do_div(bit_rate_per_lane, num_of_lanes);
+		pclk_rate = bit_rate;
+		do_div(pclk_rate, bpp);
+		byte_clk_rate = bit_rate_per_lane;
+		do_div(byte_clk_rate, 8);
+		pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
+			 bit_rate, bit_rate_per_lane);
+		pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
+			  byte_clk_rate, pclk_rate);
+
+		ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
+		ctrl->clk_freq.pix_clk_rate = pclk_rate;
+		rc = dsi_clk_set_link_frequencies(display->dsi_clk_handle,
+			ctrl->clk_freq, ctrl->cell_index);
+		if (rc) {
+			pr_err("Failed to update link frequencies\n");
+			goto error;
+		}
+
+		ctrl->host_config.bit_clk_rate_hz = bit_clk_rate;
+error:
+		mutex_unlock(&ctrl->ctrl_lock);
+
+		/* TODO: recover ctrl->clk_freq in case of failure */
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+static void _dsi_display_calc_pipe_delay(struct dsi_display *display,
+				    struct dsi_dyn_clk_delay *delay,
+				    struct dsi_display_mode *mode)
+{
+	u32 esc_clk_rate_hz;
+	u32 pclk_to_esc_ratio, byte_to_esc_ratio, hr_bit_to_esc_ratio;
+	u32 hsync_period = 0;
+	struct dsi_display_ctrl *m_ctrl;
+	struct dsi_ctrl *dsi_ctrl;
+	struct dsi_phy_cfg *cfg;
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+	dsi_ctrl = m_ctrl->ctrl;
+
+	cfg = &(m_ctrl->phy->cfg);
+
+	esc_clk_rate_hz = dsi_ctrl->clk_freq.esc_clk_rate * 1000;
+	pclk_to_esc_ratio = ((dsi_ctrl->clk_freq.pix_clk_rate * 1000) /
+			     esc_clk_rate_hz);
+	byte_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 1000) /
+			     esc_clk_rate_hz);
+	hr_bit_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 4 * 1000) /
+					esc_clk_rate_hz);
+
+	hsync_period = DSI_H_TOTAL_DSC(&mode->timing);
+	delay->pipe_delay = (hsync_period + 1) / pclk_to_esc_ratio;
+	if (!display->panel->video_config.eof_bllp_lp11_en)
+		delay->pipe_delay += (17 / pclk_to_esc_ratio) +
+			((21 + (display->config.common_config.t_clk_pre + 1) +
+			  (display->config.common_config.t_clk_post + 1)) /
+			 byte_to_esc_ratio) +
+			((((cfg->timing.lane_v3[8] >> 1) + 1) +
+			((cfg->timing.lane_v3[6] >> 1) + 1) +
+			((cfg->timing.lane_v3[3] * 4) +
+			 (cfg->timing.lane_v3[5] >> 1) + 1) +
+			((cfg->timing.lane_v3[7] >> 1) + 1) +
+			((cfg->timing.lane_v3[1] >> 1) + 1) +
+			((cfg->timing.lane_v3[4] >> 1) + 1)) /
+			 hr_bit_to_esc_ratio);
+
+	delay->pipe_delay2 = 0;
+	if (display->panel->host_config.force_hs_clk_lane)
+		delay->pipe_delay2 = (6 / byte_to_esc_ratio) +
+			((((cfg->timing.lane_v3[1] >> 1) + 1) +
+			  ((cfg->timing.lane_v3[4] >> 1) + 1)) /
+			 hr_bit_to_esc_ratio);
+
+	/* 130 us pll delay recommended by h/w doc */
+	delay->pll_delay = ((130 * esc_clk_rate_hz) / 1000000) * 2;
+}
+
+static int _dsi_display_dyn_update_clks(struct dsi_display *display,
+					struct link_clk_freq *bkp_freq)
+{
+	int rc = 0, i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+
+	dsi_clk_prepare_enable(&display->clock_info.src_clks);
+
+	rc = dsi_clk_update_parent(&display->clock_info.shadow_clks,
+			      &display->clock_info.mux_clks);
+	if (rc) {
+		pr_err("failed update mux parent to shadow\n");
+		goto exit;
+	}
+
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+		rc = dsi_clk_set_byte_clk_rate(display->dsi_clk_handle,
+				   ctrl->ctrl->clk_freq.byte_clk_rate, i);
+		if (rc) {
+			pr_err("failed to set byte rate for index:%d\n", i);
+			goto recover_byte_clk;
+		}
+		rc = dsi_clk_set_pixel_clk_rate(display->dsi_clk_handle,
+				   ctrl->ctrl->clk_freq.pix_clk_rate, i);
+		if (rc) {
+			pr_err("failed to set pix rate for index:%d\n", i);
+			goto recover_pix_clk;
+		}
+	}
+
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (ctrl == m_ctrl)
+			continue;
+		dsi_phy_dynamic_refresh_trigger(ctrl->phy, false);
+	}
+	dsi_phy_dynamic_refresh_trigger(m_ctrl->phy, true);
+
+	/* wait for dynamic refresh done */
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		rc = dsi_ctrl_wait4dynamic_refresh_done(ctrl->ctrl);
+		if (rc) {
+			pr_err("wait4dynamic refresh failed for dsi:%d\n", i);
+			goto recover_pix_clk;
+		} else {
+			pr_info("dynamic refresh done on dsi: %s\n",
+				i ? "slave" : "master");
+		}
+	}
+
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		dsi_phy_dynamic_refresh_clear(ctrl->phy);
+	}
+
+	rc = dsi_clk_update_parent(&display->clock_info.src_clks,
+			      &display->clock_info.mux_clks);
+	if (rc)
+		pr_err("could not switch back to src clks %d\n", rc);
+
+	dsi_clk_disable_unprepare(&display->clock_info.src_clks);
+
+	return rc;
+
+recover_pix_clk:
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+		dsi_clk_set_pixel_clk_rate(display->dsi_clk_handle,
+					   bkp_freq->pix_clk_rate, i);
+	}
+
+recover_byte_clk:
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+		dsi_clk_set_byte_clk_rate(display->dsi_clk_handle,
+					  bkp_freq->byte_clk_rate, i);
+	}
+
+exit:
+	dsi_clk_disable_unprepare(&display->clock_info.src_clks);
+
+	return rc;
+}
+
+static int dsi_display_dynamic_clk_switch(struct dsi_display *display,
+					  struct dsi_display_mode *mode)
+{
+	int rc = 0, mask, i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+	struct dsi_dyn_clk_delay delay;
+	struct link_clk_freq bkp_freq;
+
+	dsi_panel_acquire_panel_lock(display->panel);
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+
+	dsi_display_clk_ctrl(display->dsi_clk_handle, DSI_ALL_CLKS, DSI_CLK_ON);
+
+	/* mask PLL unlock, FIFO overflow and underflow errors */
+	mask = BIT(DSI_PLL_UNLOCK_ERR) | BIT(DSI_FIFO_UNDERFLOW) |
+		BIT(DSI_FIFO_OVERFLOW);
+	dsi_display_mask_ctrl_error_interrupts(display, mask, true);
+
+	/* update the phy timings based on new mode */
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		dsi_phy_update_phy_timings(ctrl->phy, &display->config);
+	}
+
+	/* back up existing rates to handle failure case */
+	bkp_freq.byte_clk_rate = m_ctrl->ctrl->clk_freq.byte_clk_rate;
+	bkp_freq.pix_clk_rate = m_ctrl->ctrl->clk_freq.pix_clk_rate;
+	bkp_freq.esc_clk_rate = m_ctrl->ctrl->clk_freq.esc_clk_rate;
+
+	rc = dsi_display_update_dsi_bitrate(display, mode->timing.clk_rate_hz);
+	if (rc) {
+		pr_err("failed set link frequencies %d\n", rc);
+		goto exit;
+	}
+
+	/* calculate pipe delays */
+	_dsi_display_calc_pipe_delay(display, &delay, mode);
+
+	/* configure dynamic refresh ctrl registers */
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->phy)
+			continue;
+		if (ctrl == m_ctrl)
+			dsi_phy_config_dynamic_refresh(ctrl->phy, &delay, true);
+		else
+			dsi_phy_config_dynamic_refresh(ctrl->phy, &delay,
+						       false);
+	}
+
+	rc = _dsi_display_dyn_update_clks(display, &bkp_freq);
+
+exit:
+	dsi_display_mask_ctrl_error_interrupts(display, mask, false);
+
+	dsi_display_clk_ctrl(display->dsi_clk_handle, DSI_ALL_CLKS,
+			     DSI_CLK_OFF);
+
+	/* store newly calculated phy timings in mode private info */
+	dsi_phy_dyn_refresh_cache_phy_timings(m_ctrl->phy,
+					      mode->priv_info->phy_timing_val,
+					      mode->priv_info->phy_timing_len);
+
+	dsi_panel_release_panel_lock(display->panel);
+
+	return rc;
+}
+
 static int dsi_display_dfps_update(struct dsi_display *display,
 				   struct dsi_display_mode *dsi_mode)
 {
@@ -3987,6 +4289,16 @@
 					display->name, rc);
 			goto error;
 		}
+	} else if (mode->dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK) {
+		rc = dsi_display_dynamic_clk_switch(display, mode);
+		if (rc)
+			pr_err("dynamic clk change failed %d\n", rc);
+		/*
+		 * skip rest of the opearations since
+		 * dsi_display_dynamic_clk_switch() already takes
+		 * care of them.
+		 */
+		return rc;
 	}
 
 	for (i = 0; i < display->ctrl_count; i++) {
@@ -4222,84 +4534,6 @@
 	return rc;
 }
 
-static int dsi_display_request_update_dsi_bitrate(struct dsi_display *display,
-					u32 bit_clk_rate)
-{
-	int rc = 0;
-	int i;
-
-	pr_debug("%s:bit rate:%d\n", __func__, bit_clk_rate);
-	if (!display->panel) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (bit_clk_rate == 0) {
-		pr_err("Invalid bit clock rate\n");
-		return -EINVAL;
-	}
-
-	display->config.bit_clk_rate_hz = bit_clk_rate;
-
-	for (i = 0; i < display->ctrl_count; i++) {
-		struct dsi_display_ctrl *dsi_disp_ctrl = &display->ctrl[i];
-		struct dsi_ctrl *ctrl = dsi_disp_ctrl->ctrl;
-		u32 num_of_lanes = 0;
-		u32 bpp = 3;
-		u64 bit_rate, pclk_rate, bit_rate_per_lane, byte_clk_rate;
-		struct dsi_host_common_cfg *host_cfg;
-
-		mutex_lock(&ctrl->ctrl_lock);
-
-		host_cfg = &display->panel->host_config;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_0)
-			num_of_lanes++;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_1)
-			num_of_lanes++;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_2)
-			num_of_lanes++;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_3)
-			num_of_lanes++;
-
-		if (num_of_lanes == 0) {
-			pr_err("Invalid lane count\n");
-			rc = -EINVAL;
-			goto error;
-		}
-
-		bit_rate = display->config.bit_clk_rate_hz * num_of_lanes;
-		bit_rate_per_lane = bit_rate;
-		do_div(bit_rate_per_lane, num_of_lanes);
-		pclk_rate = bit_rate;
-		do_div(pclk_rate, (8 * bpp));
-		byte_clk_rate = bit_rate_per_lane;
-		do_div(byte_clk_rate, 8);
-		pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
-			 bit_rate, bit_rate_per_lane);
-		pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
-			  byte_clk_rate, pclk_rate);
-
-		ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
-		ctrl->clk_freq.pix_clk_rate = pclk_rate;
-		rc = dsi_clk_set_link_frequencies(display->dsi_clk_handle,
-			ctrl->clk_freq, ctrl->cell_index);
-		if (rc) {
-			pr_err("Failed to update link frequencies\n");
-			goto error;
-		}
-
-		ctrl->host_config.bit_clk_rate_hz = bit_clk_rate;
-error:
-		mutex_unlock(&ctrl->ctrl_lock);
-
-		/* TODO: recover ctrl->clk_freq in case of failure */
-		if (rc)
-			return rc;
-	}
-
-	return 0;
-}
-
 static ssize_t sysfs_dynamic_dsi_clk_read(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
@@ -4350,6 +4584,11 @@
 		return rc;
 	}
 
+	if (display->panel->panel_mode != DSI_OP_CMD_MODE) {
+		pr_err("only supported for command mode\n");
+		return -ENOTSUPP;
+	}
+
 	if (clk_rate <= 0) {
 		pr_err("%s: bitrate should be greater than 0\n", __func__);
 		return -EINVAL;
@@ -4365,7 +4604,7 @@
 	mutex_lock(&display->display_lock);
 
 	display->cached_clk_rate = clk_rate;
-	rc = dsi_display_request_update_dsi_bitrate(display, clk_rate);
+	rc = dsi_display_update_dsi_bitrate(display, clk_rate);
 	if (!rc) {
 		pr_info("%s: bit clk is ready to be configured to '%d'\n",
 			__func__, clk_rate);
@@ -5151,7 +5390,8 @@
 			u32 *count)
 {
 	struct dsi_dfps_capabilities dfps_caps;
-	int num_dfps_rates, rc = 0;
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
+	int num_dfps_rates, num_bit_clks, rc = 0;
 
 	if (!display || !display->panel) {
 		pr_err("invalid display:%d panel:%d\n", display != NULL,
@@ -5168,12 +5408,16 @@
 		return rc;
 	}
 
-	num_dfps_rates = !dfps_caps.dfps_support ? 1 :
-			dfps_caps.max_refresh_rate -
-			dfps_caps.min_refresh_rate + 1;
+	num_dfps_rates = !dfps_caps.dfps_support ? 1 : dfps_caps.dfps_list_len;
 
-	/* Inflate num_of_modes by fps in dfps */
-	*count = display->panel->num_timing_nodes * num_dfps_rates;
+	dyn_clk_caps = &(display->panel->dyn_clk_caps);
+
+	num_bit_clks = !dyn_clk_caps->dyn_clk_support ? 1 :
+					dyn_clk_caps->bit_clk_list_len;
+
+	/* Inflate num_of_modes by fps and bit clks in dfps */
+	*count = display->panel->num_timing_nodes *
+				num_dfps_rates * num_bit_clks;
 
 	return 0;
 }
@@ -5196,6 +5440,73 @@
 	return 0;
 }
 
+static void _dsi_display_populate_bit_clks(struct dsi_display *display,
+					   int start, int end, u32 *mode_idx)
+{
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
+	struct dsi_display_mode *src, *dst;
+	struct dsi_host_common_cfg *cfg;
+	int i, j, total_modes, bpp, lanes = 0;
+
+	if (!display || !mode_idx)
+		return;
+
+	dyn_clk_caps = &(display->panel->dyn_clk_caps);
+	if (!dyn_clk_caps->dyn_clk_support)
+		return;
+
+	cfg = &(display->panel->host_config);
+	bpp = dsi_pixel_format_to_bpp(cfg->dst_format);
+
+	if (cfg->data_lanes & DSI_LOGICAL_LANE_0)
+		lanes++;
+	if (cfg->data_lanes & DSI_LOGICAL_LANE_1)
+		lanes++;
+	if (cfg->data_lanes & DSI_LOGICAL_LANE_2)
+		lanes++;
+	if (cfg->data_lanes & DSI_LOGICAL_LANE_3)
+		lanes++;
+
+	dsi_display_get_mode_count_no_lock(display, &total_modes);
+
+	for (i = start; i < end; i++) {
+		src = &display->modes[i];
+		if (!src)
+			return;
+		/*
+		 * TODO: currently setting the first bit rate in
+		 * the list as preferred rate. But ideally should
+		 * be based on user or device tree preferrence.
+		 */
+		src->timing.clk_rate_hz = dyn_clk_caps->bit_clk_list[0];
+		src->pixel_clk_khz =
+			div_u64(src->timing.clk_rate_hz * lanes, bpp);
+		src->pixel_clk_khz /= 1000;
+		src->pixel_clk_khz *= display->ctrl_count;
+	}
+
+	for (i = 1; i < dyn_clk_caps->bit_clk_list_len; i++) {
+		if (*mode_idx >= total_modes)
+			return;
+		for (j = start; j < end; j++) {
+			src = &display->modes[j];
+			dst = &display->modes[*mode_idx];
+
+			if (!src || !dst) {
+				pr_err("invalid mode index\n");
+				return;
+			}
+			memcpy(dst, src, sizeof(struct dsi_display_mode));
+			dst->timing.clk_rate_hz = dyn_clk_caps->bit_clk_list[i];
+			dst->pixel_clk_khz =
+				div_u64(dst->timing.clk_rate_hz * lanes, bpp);
+			dst->pixel_clk_khz /= 1000;
+			dst->pixel_clk_khz *= display->ctrl_count;
+			(*mode_idx)++;
+		}
+	}
+}
+
 void dsi_display_put_mode(struct dsi_display *display,
 	struct dsi_display_mode *mode)
 {
@@ -5206,9 +5517,10 @@
 			  struct dsi_display_mode **out_modes)
 {
 	struct dsi_dfps_capabilities dfps_caps;
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
 	u32 num_dfps_rates, panel_mode_count, total_mode_count;
 	u32 mode_idx, array_idx = 0;
-	int i, rc = -EINVAL;
+	int i, start, end, rc = -EINVAL;
 
 	if (!display || !out_modes) {
 		pr_err("Invalid params\n");
@@ -5240,9 +5552,9 @@
 		goto error;
 	}
 
-	num_dfps_rates = !dfps_caps.dfps_support ? 1 :
-			dfps_caps.max_refresh_rate -
-			dfps_caps.min_refresh_rate + 1;
+	dyn_clk_caps = &(display->panel->dyn_clk_caps);
+
+	num_dfps_rates = !dfps_caps.dfps_support ? 1 : dfps_caps.dfps_list_len;
 
 	panel_mode_count = display->panel->num_timing_nodes;
 
@@ -5263,14 +5575,14 @@
 			goto error;
 		}
 
-		if (display->ctrl_count > 1) { /* TODO: remove if */
-			panel_mode.timing.h_active *= display->ctrl_count;
-			panel_mode.timing.h_front_porch *= display->ctrl_count;
-			panel_mode.timing.h_sync_width *= display->ctrl_count;
-			panel_mode.timing.h_back_porch *= display->ctrl_count;
-			panel_mode.timing.h_skew *= display->ctrl_count;
-			panel_mode.pixel_clk_khz *= display->ctrl_count;
-		}
+		panel_mode.timing.h_active *= display->ctrl_count;
+		panel_mode.timing.h_front_porch *= display->ctrl_count;
+		panel_mode.timing.h_sync_width *= display->ctrl_count;
+		panel_mode.timing.h_back_porch *= display->ctrl_count;
+		panel_mode.timing.h_skew *= display->ctrl_count;
+		panel_mode.pixel_clk_khz *= display->ctrl_count;
+
+		start = array_idx;
 
 		for (i = 0; i < num_dfps_rates; i++) {
 			struct dsi_display_mode *sub_mode =
@@ -5284,24 +5596,24 @@
 			}
 
 			memcpy(sub_mode, &panel_mode, sizeof(panel_mode));
-
-			if (dfps_caps.dfps_support) {
-				curr_refresh_rate =
-					sub_mode->timing.refresh_rate;
-				sub_mode->timing.refresh_rate =
-					dfps_caps.min_refresh_rate +
-					(i % num_dfps_rates);
-
-				dsi_display_get_dfps_timing(display,
-					sub_mode, curr_refresh_rate);
-
-				sub_mode->pixel_clk_khz =
-					(DSI_H_TOTAL(&sub_mode->timing) *
-					DSI_V_TOTAL(&sub_mode->timing) *
-					sub_mode->timing.refresh_rate) / 1000;
-			}
 			array_idx++;
+
+			if (!dfps_caps.dfps_support)
+				continue;
+
+			curr_refresh_rate = sub_mode->timing.refresh_rate;
+			sub_mode->timing.refresh_rate = dfps_caps.dfps_list[i];
+
+			dsi_display_get_dfps_timing(display, sub_mode,
+						    curr_refresh_rate);
 		}
+
+		end = array_idx;
+		/*
+		 * if dynamic clk switch is supported then update all the bit
+		 * clk rates.
+		 */
+		_dsi_display_populate_bit_clks(display, start, end, &array_idx);
 	}
 
 exit:
@@ -5385,7 +5697,8 @@
 
 		if (cmp->timing.v_active == m->timing.v_active &&
 			cmp->timing.h_active == m->timing.h_active &&
-			cmp->timing.refresh_rate == m->timing.refresh_rate) {
+			cmp->timing.refresh_rate == m->timing.refresh_rate &&
+			cmp->pixel_clk_khz == m->pixel_clk_khz) {
 			*out_mode = m;
 			rc = 0;
 			break;
@@ -5394,9 +5707,10 @@
 	mutex_unlock(&display->display_lock);
 
 	if (!*out_mode) {
-		pr_err("[%s] failed to find mode for v_active %u h_active %u rate %u\n",
+		pr_err("[%s] failed to find mode for v_active %u h_active %u fps %u pclk %u\n",
 				display->name, cmp->timing.v_active,
-				cmp->timing.h_active, cmp->timing.refresh_rate);
+				cmp->timing.h_active, cmp->timing.refresh_rate,
+				cmp->pixel_clk_khz);
 		rc = -ENOENT;
 	}
 
@@ -5404,7 +5718,7 @@
 }
 
 /**
- * dsi_display_validate_mode_vrr() - Validate if varaible refresh case.
+ * dsi_display_validate_mode_change() - Validate if varaible refresh case.
  * @display:     DSI display handle.
  * @cur_dsi_mode:   Current DSI mode.
  * @mode:        Mode value structure to be validated.
@@ -5412,16 +5726,15 @@
  *               is change in fps but vactive and hactive are same.
  * Return: error code.
  */
-int dsi_display_validate_mode_vrr(struct dsi_display *display,
-			struct dsi_display_mode *cur_dsi_mode,
-			struct dsi_display_mode *mode)
+int dsi_display_validate_mode_change(struct dsi_display *display,
+			struct dsi_display_mode *cur_mode,
+			struct dsi_display_mode *adj_mode)
 {
 	int rc = 0;
-	struct dsi_display_mode adj_mode, cur_mode;
 	struct dsi_dfps_capabilities dfps_caps;
-	u32 curr_refresh_rate;
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
 
-	if (!display || !mode) {
+	if (!display || !adj_mode) {
 		pr_err("Invalid params\n");
 		return -EINVAL;
 	}
@@ -5433,65 +5746,43 @@
 
 	mutex_lock(&display->display_lock);
 
-	adj_mode = *mode;
-	cur_mode = *cur_dsi_mode;
-
-	if ((cur_mode.timing.refresh_rate != adj_mode.timing.refresh_rate) &&
-		(cur_mode.timing.v_active == adj_mode.timing.v_active) &&
-		(cur_mode.timing.h_active == adj_mode.timing.h_active)) {
-
-		curr_refresh_rate = cur_mode.timing.refresh_rate;
-		rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
-		if (rc) {
-			pr_err("[%s] failed to get dfps caps from panel\n",
-					display->name);
-			goto error;
+	if ((cur_mode->timing.v_active == adj_mode->timing.v_active) &&
+	    (cur_mode->timing.h_active == adj_mode->timing.h_active)) {
+		/* dfps change use case */
+		if (cur_mode->timing.refresh_rate !=
+		    adj_mode->timing.refresh_rate) {
+			dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+			if (!dfps_caps.dfps_support) {
+				pr_err("invalid mode dfps not supported\n");
+				rc = -ENOTSUPP;
+				goto error;
+			}
+			pr_debug("Mode switch is seamless variable refresh\n");
+			adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
+			SDE_EVT32(cur_mode->timing.refresh_rate,
+				  adj_mode->timing.refresh_rate,
+				  cur_mode->timing.h_front_porch,
+				  adj_mode->timing.h_front_porch);
 		}
 
-		cur_mode.timing.refresh_rate =
-			adj_mode.timing.refresh_rate;
-
-		rc = dsi_display_get_dfps_timing(display,
-			&cur_mode, curr_refresh_rate);
-		if (rc) {
-			pr_err("[%s] seamless vrr not possible rc=%d\n",
-			display->name, rc);
-			goto error;
+		/* dynamic clk change use case */
+		if (cur_mode->pixel_clk_khz != adj_mode->pixel_clk_khz) {
+			dyn_clk_caps = &(display->panel->dyn_clk_caps);
+			if (!dyn_clk_caps->dyn_clk_support) {
+				pr_err("dyn clk change not supported\n");
+				rc = -ENOTSUPP;
+				goto error;
+			}
+			if (adj_mode->dsi_mode_flags & DSI_MODE_FLAG_VRR) {
+				pr_err("dfps and dyn clk not supported in same commit\n");
+				rc = -ENOTSUPP;
+				goto error;
+			}
+			pr_debug("dynamic clk change detected\n");
+			adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_DYN_CLK;
+			SDE_EVT32(cur_mode->pixel_clk_khz,
+				  adj_mode->pixel_clk_khz);
 		}
-		switch (dfps_caps.type) {
-		/*
-		 * Ignore any round off factors in porch calculation.
-		 * Worse case is set to 5.
-		 */
-		case DSI_DFPS_IMMEDIATE_VFP:
-			if (abs(DSI_V_TOTAL(&cur_mode.timing) -
-				DSI_V_TOTAL(&adj_mode.timing)) > 5)
-				pr_err("Mismatch vfp fps:%d new:%d given:%d\n",
-				adj_mode.timing.refresh_rate,
-				cur_mode.timing.v_front_porch,
-				adj_mode.timing.v_front_porch);
-			break;
-
-		case DSI_DFPS_IMMEDIATE_HFP:
-			if (abs(DSI_H_TOTAL(&cur_mode.timing) -
-				DSI_H_TOTAL(&adj_mode.timing)) > 5)
-				pr_err("Mismatch hfp fps:%d new:%d given:%d\n",
-				adj_mode.timing.refresh_rate,
-				cur_mode.timing.h_front_porch,
-				adj_mode.timing.h_front_porch);
-			break;
-
-		default:
-			pr_err("Unsupported DFPS mode %d\n",
-				dfps_caps.type);
-			rc = -ENOTSUPP;
-		}
-
-		pr_debug("Mode switch is seamless variable refresh\n");
-		mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
-		SDE_EVT32(curr_refresh_rate, adj_mode.timing.refresh_rate,
-				cur_mode.timing.h_front_porch,
-				adj_mode.timing.h_front_porch);
 	}
 
 error:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 5612016..f65f0f5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -398,13 +398,14 @@
 			      u32 flags);
 
 /**
- * dsi_display_validate_mode_vrr() - validates mode if variable refresh case
+ * dsi_display_validate_mode_change() - validates mode if variable refresh case
+ *				or dynamic clk change case
  * @display:             Handle to display.
  * @mode:                Mode to be validated..
  *
  * Return: 0 if  error code.
  */
-int dsi_display_validate_mode_vrr(struct dsi_display *display,
+int dsi_display_validate_mode_change(struct dsi_display *display,
 			struct dsi_display_mode *cur_dsi_mode,
 			struct dsi_display_mode *mode);
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 1278d59..68a7277 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -63,6 +63,8 @@
 		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DMS;
 	if (msm_is_mode_seamless_vrr(drm_mode))
 		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
+	if (msm_is_mode_seamless_dyn_clk(drm_mode))
+		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DYN_CLK;
 
 	dsi_mode->timing.h_sync_polarity =
 			!!(drm_mode->flags & DRM_MODE_FLAG_PHSYNC);
@@ -105,13 +107,18 @@
 		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DMS;
 	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_VRR)
 		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_VRR;
+	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK)
+		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYN_CLK;
 
 	if (dsi_mode->timing.h_sync_polarity)
 		drm_mode->flags |= DRM_MODE_FLAG_PHSYNC;
 	if (dsi_mode->timing.v_sync_polarity)
 		drm_mode->flags |= DRM_MODE_FLAG_PVSYNC;
 
-	drm_mode_set_name(drm_mode);
+	/* set mode name */
+	snprintf(drm_mode->name, DRM_DISPLAY_MODE_LEN, "%dx%dx%dx%d",
+		 drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->vrefresh,
+		 drm_mode->clock);
 }
 
 static int dsi_bridge_attach(struct drm_bridge *bridge)
@@ -156,7 +163,8 @@
 	}
 
 	if (c_bridge->dsi_mode.dsi_mode_flags &
-		(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR)) {
+		(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR |
+		 DSI_MODE_FLAG_DYN_CLK)) {
 		pr_debug("[%d] seamless pre-enable\n", c_bridge->id);
 		return;
 	}
@@ -279,6 +287,12 @@
 
 	memset(&(c_bridge->dsi_mode), 0x0, sizeof(struct dsi_display_mode));
 	convert_to_dsi_mode(adjusted_mode, &(c_bridge->dsi_mode));
+
+	/* restore bit_clk_rate also for dynamic clk use cases */
+	c_bridge->dsi_mode.timing.clk_rate_hz =
+		dsi_drm_find_bit_clk_rate(c_bridge->display, adjusted_mode);
+
+	pr_debug("clk_rate: %llu\n", c_bridge->dsi_mode.timing.clk_rate_hz);
 }
 
 static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
@@ -337,17 +351,20 @@
 
 		convert_to_dsi_mode(&crtc_state->crtc->state->mode,
 							&cur_dsi_mode);
-		rc = dsi_display_validate_mode_vrr(c_bridge->display,
+		rc = dsi_display_validate_mode_change(c_bridge->display,
 					&cur_dsi_mode, &dsi_mode);
-		if (rc)
-			pr_debug("[%s] vrr mode mismatch failure rc=%d\n",
+		if (rc) {
+			pr_err("[%s] seamless mode mismatch failure rc=%d\n",
 				c_bridge->display->name, rc);
+			return false;
+		}
 
 		cur_mode = crtc_state->crtc->mode;
 
 		/* No DMS/VRR when drm pipeline is changing */
 		if (!drm_mode_equal(&cur_mode, adjusted_mode) &&
 			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
+			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK)) &&
 			(!crtc_state->active_changed ||
 			 display->is_cont_splash_enabled))
 			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
@@ -359,6 +376,33 @@
 	return true;
 }
 
+u64 dsi_drm_find_bit_clk_rate(void *display,
+			      const struct drm_display_mode *drm_mode)
+{
+	int i = 0, count = 0;
+	struct dsi_display *dsi_display = display;
+	struct dsi_display_mode *dsi_mode;
+	u64 bit_clk_rate = 0;
+
+	if (!dsi_display || !drm_mode)
+		return 0;
+
+	dsi_display_get_mode_count(dsi_display, &count);
+
+	for (i = 0; i < count; i++) {
+		dsi_mode = &dsi_display->modes[i];
+		if ((dsi_mode->timing.v_active == drm_mode->vdisplay) &&
+		    (dsi_mode->timing.h_active == drm_mode->hdisplay) &&
+		    (dsi_mode->pixel_clk_khz == drm_mode->clock) &&
+		    (dsi_mode->timing.refresh_rate == drm_mode->vrefresh)) {
+			bit_clk_rate = dsi_mode->timing.clk_rate_hz;
+			break;
+		}
+	}
+
+	return bit_clk_rate;
+}
+
 int dsi_conn_get_mode_info(const struct drm_display_mode *drm_mode,
 	struct msm_mode_info *mode_info,
 	u32 max_mixer_width, void *display)
@@ -382,7 +426,7 @@
 	mode_info->prefill_lines = dsi_mode.priv_info->panel_prefill_lines;
 	mode_info->jitter_numer = dsi_mode.priv_info->panel_jitter_numer;
 	mode_info->jitter_denom = dsi_mode.priv_info->panel_jitter_denom;
-	mode_info->clk_rate = dsi_mode.priv_info->clk_rate_hz;
+	mode_info->clk_rate = dsi_drm_find_bit_clk_rate(display, drm_mode);
 
 	memcpy(&mode_info->topology, &dsi_mode.priv_info->topology,
 			sizeof(struct msm_display_topology));
@@ -507,6 +551,9 @@
 			panel->dfps_caps.max_refresh_rate);
 	}
 
+	sde_kms_info_add_keystr(info, "dyn bitclk support",
+			panel->dyn_clk_caps.dyn_clk_support ? "true" : "false");
+
 	switch (panel->phy_props.rotation) {
 	case DSI_PANEL_ROTATE_NONE:
 		sde_kms_info_add_keystr(info, "panel orientation", "none");
@@ -662,6 +709,9 @@
 		}
 		m->width_mm = connector->display_info.width_mm;
 		m->height_mm = connector->display_info.height_mm;
+		/* set the first mode in list as preferred */
+		if (i == 0)
+			m->type |= DRM_MODE_TYPE_PREFERRED;
 		drm_mode_probed_add(connector, m);
 	}
 end:
@@ -768,6 +818,9 @@
 		c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_VRR;
 	}
 
+	/* ensure dynamic clk switch flag is reset */
+	c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_DYN_CLK;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 2bad8c0..8d3e764 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -150,4 +150,6 @@
 void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
 				struct drm_display_mode *drm_mode);
 
+u64 dsi_drm_find_bit_clk_rate(void *display,
+			      const struct drm_display_mode *drm_mode);
 #endif /* _DSI_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
index 174be9f..9ccff4b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -45,4 +45,14 @@
 #define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
 #define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
 
+#define PLL_CALC_DATA(addr0, addr1, data0, data1)      \
+	(((data1) << 24) | ((((addr1)/4) & 0xFF) << 16) | \
+	 ((data0) << 8) | (((addr0)/4) & 0xFF))
+
+#define DSI_DYN_REF_REG_W(base, offset, addr0, addr1, data0, data1)   \
+	writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
+			(base) + (offset))
+
+#define DSI_GEN_R32(base, offset) readl_relaxed(base + (offset))
+#define DSI_GEN_W32(base, offset, val) writel_relaxed((val), base + (offset))
 #endif /* _DSI_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 61c3fcb..b43b23c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1130,6 +1130,46 @@
 	return rc;
 }
 
+static int dsi_panel_parse_dyn_clk_caps(struct dsi_dyn_clk_caps *dyn_clk_caps,
+				     struct device_node *of_node,
+				     const char *name)
+{
+	int rc = 0;
+	bool supported = false;
+
+	supported = of_property_read_bool(of_node, "qcom,dsi-dyn-clk-enable");
+
+	if (!supported) {
+		dyn_clk_caps->dyn_clk_support = false;
+		return rc;
+	}
+
+	of_find_property(of_node, "qcom,dsi-dyn-clk-list",
+			      &dyn_clk_caps->bit_clk_list_len);
+	dyn_clk_caps->bit_clk_list_len /= sizeof(u32);
+	if (dyn_clk_caps->bit_clk_list_len < 1) {
+		pr_err("[%s] failed to get supported bit clk list\n", name);
+		return -EINVAL;
+	}
+
+	dyn_clk_caps->bit_clk_list = kcalloc(dyn_clk_caps->bit_clk_list_len,
+					     sizeof(u32), GFP_KERNEL);
+	if (!dyn_clk_caps->bit_clk_list)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node, "qcom,dsi-dyn-clk-list",
+				   dyn_clk_caps->bit_clk_list,
+				   dyn_clk_caps->bit_clk_list_len);
+	if (rc) {
+		pr_err("[%s] failed to parse supported bit clk list\n", name);
+		return -EINVAL;
+	}
+
+	dyn_clk_caps->dyn_clk_support = true;
+
+	return 0;
+}
+
 static int dsi_panel_parse_dfps_caps(struct dsi_dfps_capabilities *dfps_caps,
 				     struct device_node *of_node,
 				     const char *name)
@@ -1137,7 +1177,7 @@
 	int rc = 0;
 	bool supported = false;
 	const char *type;
-	u32 val = 0;
+	u32 val = 0, i;
 
 	supported = of_property_read_bool(of_node,
 					"qcom,mdss-dsi-pan-enable-dynamic-fps");
@@ -1145,68 +1185,68 @@
 	if (!supported) {
 		pr_debug("[%s] DFPS is not supported\n", name);
 		dfps_caps->dfps_support = false;
-	} else {
-
-		type = of_get_property(of_node,
-				       "qcom,mdss-dsi-pan-fps-update",
-				       NULL);
-		if (!type) {
-			pr_err("[%s] dfps type not defined\n", name);
-			rc = -EINVAL;
-			goto error;
-		} else if (!strcmp(type, "dfps_suspend_resume_mode")) {
-			dfps_caps->type = DSI_DFPS_SUSPEND_RESUME;
-		} else if (!strcmp(type, "dfps_immediate_clk_mode")) {
-			dfps_caps->type = DSI_DFPS_IMMEDIATE_CLK;
-		} else if (!strcmp(type, "dfps_immediate_porch_mode_hfp")) {
-			dfps_caps->type = DSI_DFPS_IMMEDIATE_HFP;
-		} else if (!strcmp(type, "dfps_immediate_porch_mode_vfp")) {
-			dfps_caps->type = DSI_DFPS_IMMEDIATE_VFP;
-		} else {
-			pr_err("[%s] dfps type is not recognized\n", name);
-			rc = -EINVAL;
-			goto error;
-		}
-
-		rc = of_property_read_u32(of_node,
-					  "qcom,mdss-dsi-min-refresh-rate",
-					  &val);
-		if (rc) {
-			pr_err("[%s] Min refresh rate is not defined\n", name);
-			rc = -EINVAL;
-			goto error;
-		}
-		dfps_caps->min_refresh_rate = val;
-
-		rc = of_property_read_u32(of_node,
-					  "qcom,mdss-dsi-max-refresh-rate",
-					  &val);
-		if (rc) {
-			pr_debug("[%s] Using default refresh rate\n", name);
-			rc = of_property_read_u32(of_node,
-						"qcom,mdss-dsi-panel-framerate",
-						&val);
-			if (rc) {
-				pr_err("[%s] max refresh rate is not defined\n",
-				       name);
-				rc = -EINVAL;
-				goto error;
-			}
-		}
-		dfps_caps->max_refresh_rate = val;
-
-		if (dfps_caps->min_refresh_rate > dfps_caps->max_refresh_rate) {
-			pr_err("[%s] min rate > max rate\n", name);
-			rc = -EINVAL;
-		}
-
-		pr_debug("[%s] DFPS is supported %d-%d, mode %d\n", name,
-				dfps_caps->min_refresh_rate,
-				dfps_caps->max_refresh_rate,
-				dfps_caps->type);
-		dfps_caps->dfps_support = true;
+		return rc;
 	}
 
+	type = of_get_property(of_node,
+			       "qcom,mdss-dsi-pan-fps-update",
+			       NULL);
+	if (!type) {
+		pr_err("[%s] dfps type not defined\n", name);
+		rc = -EINVAL;
+		goto error;
+	} else if (!strcmp(type, "dfps_suspend_resume_mode")) {
+		dfps_caps->type = DSI_DFPS_SUSPEND_RESUME;
+	} else if (!strcmp(type, "dfps_immediate_clk_mode")) {
+		dfps_caps->type = DSI_DFPS_IMMEDIATE_CLK;
+	} else if (!strcmp(type, "dfps_immediate_porch_mode_hfp")) {
+		dfps_caps->type = DSI_DFPS_IMMEDIATE_HFP;
+	} else if (!strcmp(type, "dfps_immediate_porch_mode_vfp")) {
+		dfps_caps->type = DSI_DFPS_IMMEDIATE_VFP;
+	} else {
+		pr_err("[%s] dfps type is not recognized\n", name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	of_find_property(of_node, "qcom,dsi-supported-dfps-list",
+			 &dfps_caps->dfps_list_len);
+	dfps_caps->dfps_list_len /= sizeof(u32);
+	if (dfps_caps->dfps_list_len < 1) {
+		pr_err("[%s] dfps refresh list not present\n", name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dfps_caps->dfps_list = kcalloc(dfps_caps->dfps_list_len, sizeof(u32),
+				       GFP_KERNEL);
+	if (!dfps_caps->dfps_list) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,dsi-supported-dfps-list",
+					dfps_caps->dfps_list,
+					dfps_caps->dfps_list_len);
+	if (rc) {
+		pr_err("[%s] dfps refresh rate list parse failed\n", name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dfps_caps->dfps_support = true;
+
+	/* calculate max and min fps */
+	of_property_read_u32(of_node, "qcom,mdss-dsi-panel-framerate", &val);
+	dfps_caps->max_refresh_rate = val;
+	dfps_caps->min_refresh_rate = val;
+
+	for (i = 0; i < dfps_caps->dfps_list_len; i++) {
+		if (dfps_caps->dfps_list[i] < dfps_caps->min_refresh_rate)
+			dfps_caps->min_refresh_rate = dfps_caps->dfps_list[i];
+		else if (dfps_caps->dfps_list[i] > dfps_caps->max_refresh_rate)
+			dfps_caps->max_refresh_rate = dfps_caps->dfps_list[i];
+	}
 error:
 	return rc;
 }
@@ -2930,6 +2970,14 @@
 			pr_err("failed to parse dfps configuration, rc=%d\n",
 				rc);
 
+		if (panel->panel_mode == DSI_OP_VIDEO_MODE) {
+			rc = dsi_panel_parse_dyn_clk_caps(&panel->dyn_clk_caps,
+				of_node, panel->name);
+			if (rc)
+				pr_err("failed to parse dynamic clk config, rc=%d\n",
+				       rc);
+		}
+
 		rc = dsi_panel_parse_phy_props(&panel->phy_props,
 			of_node, panel->name);
 		if (rc) {
@@ -3329,7 +3377,7 @@
 	if (mode->priv_info) {
 		config->video_timing.dsc_enabled = mode->priv_info->dsc_enabled;
 		config->video_timing.dsc = &mode->priv_info->dsc;
-		config->bit_clk_rate_hz = mode->priv_info->clk_rate_hz;
+		config->bit_clk_rate_hz = mode->timing.clk_rate_hz;
 	}
 	config->esc_clk_rate_hz = 19200000;
 	mutex_unlock(&panel->panel_lock);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index fef7d3f..ab8ccee 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -70,10 +70,18 @@
 };
 
 struct dsi_dfps_capabilities {
-	bool dfps_support;
 	enum dsi_dfps_type type;
 	u32 min_refresh_rate;
 	u32 max_refresh_rate;
+	u32 *dfps_list;
+	u32 dfps_list_len;
+	bool dfps_support;
+};
+
+struct dsi_dyn_clk_caps {
+	bool dyn_clk_support;
+	u32 *bit_clk_list;
+	u32 bit_clk_list_len;
 };
 
 struct dsi_pinctrl_info {
@@ -170,6 +178,7 @@
 	enum dsi_op_mode panel_mode;
 
 	struct dsi_dfps_capabilities dfps_caps;
+	struct dsi_dyn_clk_caps dyn_clk_caps;
 	struct dsi_panel_phy_props phy_props;
 
 	struct dsi_display_mode *cur_mode;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index 3d6711f..ebc699a 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -107,6 +107,9 @@
 
 	phy->hw.base = ptr;
 
+	ptr = msm_ioremap(pdev, "dyn_refresh_base", phy->name);
+	phy->hw.dyn_pll_base = ptr;
+
 	pr_debug("[%s] map dsi_phy registers to %pK\n",
 		phy->name, phy->hw.base);
 
@@ -616,11 +619,8 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&dsi_phy->phy_lock);
-
 	pr_debug("[PHY_%d] Skipping validation\n", dsi_phy->index);
 
-	mutex_unlock(&dsi_phy->phy_lock);
 	return rc;
 }
 
@@ -848,7 +848,7 @@
 		rc = phy->hw.ops.calculate_timing_params(&phy->hw,
 						 &phy->mode,
 						 &config->common_config,
-						 &phy->cfg.timing);
+						 &phy->cfg.timing, false);
 	if (rc) {
 		pr_err("[%s] failed to set timing, rc=%d\n", phy->name, rc);
 		goto error;
@@ -866,6 +866,27 @@
 	return rc;
 }
 
+/* update dsi phy timings for dynamic clk switch use case */
+int dsi_phy_update_phy_timings(struct msm_dsi_phy *phy,
+			       struct dsi_host_config *config)
+{
+	int rc = 0;
+
+	if (!phy || !config) {
+		pr_err("invalid argument\n");
+		return -EINVAL;
+	}
+
+	memcpy(&phy->mode, &config->video_timing, sizeof(phy->mode));
+	rc = phy->hw.ops.calculate_timing_params(&phy->hw, &phy->mode,
+						 &config->common_config,
+						 &phy->cfg.timing, true);
+	if (rc)
+		pr_err("failed to calculate phy timings %d\n", rc);
+
+	return rc;
+}
+
 int dsi_phy_lane_reset(struct msm_dsi_phy *phy)
 {
 	int ret = 0;
@@ -1030,10 +1051,111 @@
 		rc = phy->hw.ops.phy_timing_val(&phy->cfg.timing, timing, size);
 	if (!rc)
 		phy->cfg.is_phy_timing_present = true;
+
 	mutex_unlock(&phy->phy_lock);
 	return rc;
 }
 
+/**
+ * dsi_phy_dynamic_refresh_trigger() - trigger dynamic refresh
+ * @phy:	DSI PHY handle
+ * @is_master:	Boolean to indicate if for master or slave.
+ */
+void dsi_phy_dynamic_refresh_trigger(struct msm_dsi_phy *phy, bool is_master)
+{
+	u32 off;
+
+	if (!phy)
+		return;
+
+	mutex_lock(&phy->phy_lock);
+	/*
+	 * program PLL_SWI_INTF_SEL and SW_TRIGGER bit only for
+	 * master and program SYNC_MODE bit only for slave.
+	 */
+	if (is_master)
+		off = BIT(DYN_REFRESH_INTF_SEL) | BIT(DYN_REFRESH_SWI_CTRL) |
+			BIT(DYN_REFRESH_SW_TRIGGER);
+	else
+		off = BIT(DYN_REFRESH_SYNC_MODE) | BIT(DYN_REFRESH_SWI_CTRL);
+
+	if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper)
+		phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper(&phy->hw, off);
+
+	mutex_unlock(&phy->phy_lock);
+}
+
+/**
+ * dsi_phy_config_dynamic_refresh() - Configure dynamic refresh registers
+ * @phy:	DSI PHY handle
+ * @delay:	pipe delays for dynamic refresh
+ * @is_master:	Boolean to indicate if for master or slave.
+ */
+void dsi_phy_config_dynamic_refresh(struct msm_dsi_phy *phy,
+				    struct dsi_dyn_clk_delay *delay,
+				    bool is_master)
+{
+	struct dsi_phy_cfg *cfg;
+
+	if (!phy)
+		return;
+
+	mutex_lock(&phy->phy_lock);
+
+	cfg = &phy->cfg;
+
+	if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_config)
+		phy->hw.ops.dyn_refresh_ops.dyn_refresh_config(&phy->hw, cfg,
+							       is_master);
+	if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_pipe_delay)
+		phy->hw.ops.dyn_refresh_ops.dyn_refresh_pipe_delay(
+						&phy->hw, delay);
+
+	mutex_unlock(&phy->phy_lock);
+}
+
+/**
+ * dsi_phy_cache_phy_timings - cache the phy timings calculated as part of
+ *				dynamic refresh.
+ * @phy:	   DSI PHY Handle.
+ * @dst:	   Pointer to cache location.
+ * @size:	   Number of phy lane settings.
+ */
+int dsi_phy_dyn_refresh_cache_phy_timings(struct msm_dsi_phy *phy, u32 *dst,
+					  u32 size)
+{
+	int rc = 0;
+
+	if (!phy || !dst || !size)
+		return -EINVAL;
+
+	if (phy->hw.ops.dyn_refresh_ops.cache_phy_timings)
+		rc = phy->hw.ops.dyn_refresh_ops.cache_phy_timings(
+					   &phy->cfg.timing, dst, size);
+
+	if (rc)
+		pr_err("failed to cache phy timings %d\n", rc);
+
+	return rc;
+}
+
+/**
+ * dsi_phy_dynamic_refresh_clear() - clear dynamic refresh config
+ * @phy:	DSI PHY handle
+ */
+void dsi_phy_dynamic_refresh_clear(struct msm_dsi_phy *phy)
+{
+	if (!phy)
+		return;
+
+	mutex_lock(&phy->phy_lock);
+
+	if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper)
+		phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper(&phy->hw, 0);
+
+	mutex_unlock(&phy->phy_lock);
+}
+
 void dsi_phy_drv_register(void)
 {
 	platform_driver_register(&dsi_phy_platform_driver);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
index 4163411..65c7a16 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
@@ -278,4 +278,45 @@
  */
 void dsi_phy_drv_unregister(void);
 
+/**
+ * dsi_phy_update_phy_timings() - Update dsi phy timings
+ * @phy:	DSI PHY handle
+ * @config:	DSI Host config parameters
+ *
+ * Return: error code.
+ */
+int dsi_phy_update_phy_timings(struct msm_dsi_phy *phy,
+			       struct dsi_host_config *config);
+
+/**
+ * dsi_phy_config_dynamic_refresh() - Configure dynamic refresh registers
+ * @phy:	DSI PHY handle
+ * @delay:	pipe delays for dynamic refresh
+ * @is_master:	Boolean to indicate if for master or slave
+ */
+void dsi_phy_config_dynamic_refresh(struct msm_dsi_phy *phy,
+				    struct dsi_dyn_clk_delay *delay,
+				    bool is_master);
+/**
+ * dsi_phy_dynamic_refresh_trigger() - trigger dynamic refresh
+ * @phy:	DSI PHY handle
+ * @is_master:	Boolean to indicate if for master or slave.
+ */
+void dsi_phy_dynamic_refresh_trigger(struct msm_dsi_phy *phy, bool is_master);
+
+/**
+ * dsi_phy_dynamic_refresh_clear() - clear dynamic refresh config
+ * @phy:	DSI PHY handle
+ */
+void dsi_phy_dynamic_refresh_clear(struct msm_dsi_phy *phy);
+
+/**
+ * dsi_phy_dyn_refresh_cache_phy_timings - cache the phy timings calculated
+ *				as part of dynamic refresh.
+ * @phy:	   DSI PHY Handle.
+ * @dst:	   Pointer to cache location.
+ * @size:	   Number of phy lane settings.
+ */
+int dsi_phy_dyn_refresh_cache_phy_timings(struct msm_dsi_phy *phy,
+					  u32 *dst, u32 size);
 #endif /* _DSI_PHY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
index d24a613..67a1157 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -159,6 +159,43 @@
 	bool (*is_lanes_in_ulps)(u32 ulps, u32 ulps_lanes);
 };
 
+struct phy_dyn_refresh_ops {
+	/**
+	 * dyn_refresh_helper - helper function to config particular registers
+	 * @phy:           Pointer to DSI PHY hardware instance.
+	 * @offset:         register offset to program.
+	 */
+	void (*dyn_refresh_helper)(struct dsi_phy_hw *phy, u32 offset);
+
+	/**
+	 * dyn_refresh_config - configure dynamic refresh ctrl registers
+	 * @phy:           Pointer to DSI PHY hardware instance.
+	 * @cfg:	   Pointer to DSI PHY timings.
+	 * @is_master:	   Boolean to indicate whether for master or slave.
+	 */
+	void (*dyn_refresh_config)(struct dsi_phy_hw *phy,
+				   struct dsi_phy_cfg *cfg, bool is_master);
+
+	/**
+	 * dyn_refresh_pipe_delay - configure pipe delay registers for dynamic
+	 *				refresh.
+	 * @phy:           Pointer to DSI PHY hardware instance.
+	 * @delay:	   structure containing all the delays to be programed.
+	 */
+	void (*dyn_refresh_pipe_delay)(struct dsi_phy_hw *phy,
+				      struct dsi_dyn_clk_delay *delay);
+
+	/**
+	 * cache_phy_timings - cache the phy timings calculated as part of
+	 *				dynamic refresh.
+	 * @timings:       Pointer to calculated phy timing parameters.
+	 * @dst:	   Pointer to cache location.
+	 * @size:	   Number of phy lane settings.
+	 */
+	int (*cache_phy_timings)(struct dsi_phy_per_lane_cfgs *timings,
+				  u32 *dst, u32 size);
+};
+
 /**
  * struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
  * @regulator_enable:          Enable PHY regulators.
@@ -218,11 +255,14 @@
 	 * @mode:     Mode information for which timing has to be calculated.
 	 * @config:   DSI host configuration for this mode.
 	 * @timing:   Timing parameters for each lane which will be returned.
+	 * @use_mode_bit_clk: Boolean to indicate whether reacalculate dsi
+	 *		bitclk or use the existing bitclk(for dynamic clk case).
 	 */
 	int (*calculate_timing_params)(struct dsi_phy_hw *phy,
 				       struct dsi_mode_info *mode,
 				       struct dsi_host_common_cfg *config,
-				       struct dsi_phy_per_lane_cfgs *timing);
+				       struct dsi_phy_per_lane_cfgs *timing,
+				       bool use_mode_bit_clk);
 
 	/**
 	 * phy_timing_val() - Gets PHY timing values.
@@ -257,12 +297,15 @@
 
 	void *timing_ops;
 	struct phy_ulps_config_ops ulps_ops;
+	struct phy_dyn_refresh_ops dyn_refresh_ops;
 };
 
 /**
  * struct dsi_phy_hw - DSI phy hardware object specific to an instance
  * @base:                  VA for the DSI PHY base address.
  * @length:                Length of the DSI PHY register base map.
+ * @dyn_pll_base:      VA for the DSI dynamic refresh base address.
+ * @length:                Length of the DSI dynamic refresh register base map.
  * @index:                 Instance ID of the controller.
  * @version:               DSI PHY version.
  * @feature_map:           Features supported by DSI PHY.
@@ -271,6 +314,8 @@
 struct dsi_phy_hw {
 	void __iomem *base;
 	u32 length;
+	void __iomem *dyn_pll_base;
+	u32 dyn_refresh_len;
 	u32 index;
 
 	enum dsi_phy_version version;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
index 1d4f2ab..6c6286d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
@@ -51,7 +51,6 @@
 #define DSIPHY_CMN_LANE_STATUS0						0x0F4
 #define DSIPHY_CMN_LANE_STATUS1						0x0F8
 
-
 /* n = 0..3 for data lanes and n = 4 for clock lane */
 #define DSIPHY_LNX_CFG0(n)                         (0x200 + (0x80 * (n)))
 #define DSIPHY_LNX_CFG1(n)                         (0x204 + (0x80 * (n)))
@@ -66,6 +65,47 @@
 #define DSIPHY_LNX_LPRX_CTRL(n)                    (0x228 + (0x80 * (n)))
 #define DSIPHY_LNX_TX_DCTRL(n)                     (0x22C + (0x80 * (n)))
 
+/* dynamic refresh control registers */
+#define DSI_DYN_REFRESH_CTRL                   (0x000)
+#define DSI_DYN_REFRESH_PIPE_DELAY             (0x004)
+#define DSI_DYN_REFRESH_PIPE_DELAY2            (0x008)
+#define DSI_DYN_REFRESH_PLL_DELAY              (0x00C)
+#define DSI_DYN_REFRESH_STATUS                 (0x010)
+#define DSI_DYN_REFRESH_PLL_CTRL0              (0x014)
+#define DSI_DYN_REFRESH_PLL_CTRL1              (0x018)
+#define DSI_DYN_REFRESH_PLL_CTRL2              (0x01C)
+#define DSI_DYN_REFRESH_PLL_CTRL3              (0x020)
+#define DSI_DYN_REFRESH_PLL_CTRL4              (0x024)
+#define DSI_DYN_REFRESH_PLL_CTRL5              (0x028)
+#define DSI_DYN_REFRESH_PLL_CTRL6              (0x02C)
+#define DSI_DYN_REFRESH_PLL_CTRL7              (0x030)
+#define DSI_DYN_REFRESH_PLL_CTRL8              (0x034)
+#define DSI_DYN_REFRESH_PLL_CTRL9              (0x038)
+#define DSI_DYN_REFRESH_PLL_CTRL10             (0x03C)
+#define DSI_DYN_REFRESH_PLL_CTRL11             (0x040)
+#define DSI_DYN_REFRESH_PLL_CTRL12             (0x044)
+#define DSI_DYN_REFRESH_PLL_CTRL13             (0x048)
+#define DSI_DYN_REFRESH_PLL_CTRL14             (0x04C)
+#define DSI_DYN_REFRESH_PLL_CTRL15             (0x050)
+#define DSI_DYN_REFRESH_PLL_CTRL16             (0x054)
+#define DSI_DYN_REFRESH_PLL_CTRL17             (0x058)
+#define DSI_DYN_REFRESH_PLL_CTRL18             (0x05C)
+#define DSI_DYN_REFRESH_PLL_CTRL19             (0x060)
+#define DSI_DYN_REFRESH_PLL_CTRL20             (0x064)
+#define DSI_DYN_REFRESH_PLL_CTRL21             (0x068)
+#define DSI_DYN_REFRESH_PLL_CTRL22             (0x06C)
+#define DSI_DYN_REFRESH_PLL_CTRL23             (0x070)
+#define DSI_DYN_REFRESH_PLL_CTRL24             (0x074)
+#define DSI_DYN_REFRESH_PLL_CTRL25             (0x078)
+#define DSI_DYN_REFRESH_PLL_CTRL26             (0x07C)
+#define DSI_DYN_REFRESH_PLL_CTRL27             (0x080)
+#define DSI_DYN_REFRESH_PLL_CTRL28             (0x084)
+#define DSI_DYN_REFRESH_PLL_CTRL29             (0x088)
+#define DSI_DYN_REFRESH_PLL_CTRL30             (0x08C)
+#define DSI_DYN_REFRESH_PLL_CTRL31             (0x090)
+#define DSI_DYN_REFRESH_PLL_UPPER_ADDR         (0x094)
+#define DSI_DYN_REFRESH_PLL_UPPER_ADDR2        (0x098)
+
 static inline int dsi_conv_phy_to_logical_lane(
 	struct dsi_lane_map *lane_map, enum dsi_phy_data_lanes phy_lane)
 {
@@ -500,3 +540,163 @@
 		timing_cfg->lane_v3[i] = timing_val[i];
 	return 0;
 }
+
+void dsi_phy_hw_v3_0_dyn_refresh_config(struct dsi_phy_hw *phy,
+					struct dsi_phy_cfg *cfg, bool is_master)
+{
+	u32 reg;
+
+	if (is_master) {
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
+			  DSIPHY_CMN_GLBL_CTRL, DSIPHY_CMN_VREG_CTRL,
+			  0x10, 0x59);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL10,
+			  DSIPHY_CMN_TIMING_CTRL_0, DSIPHY_CMN_TIMING_CTRL_1,
+			  cfg->timing.lane_v3[0], cfg->timing.lane_v3[1]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL11,
+			  DSIPHY_CMN_TIMING_CTRL_2, DSIPHY_CMN_TIMING_CTRL_3,
+			  cfg->timing.lane_v3[2], cfg->timing.lane_v3[3]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL12,
+			  DSIPHY_CMN_TIMING_CTRL_4, DSIPHY_CMN_TIMING_CTRL_5,
+			  cfg->timing.lane_v3[4], cfg->timing.lane_v3[5]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL13,
+			  DSIPHY_CMN_TIMING_CTRL_6, DSIPHY_CMN_TIMING_CTRL_7,
+			  cfg->timing.lane_v3[6], cfg->timing.lane_v3[7]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL14,
+			  DSIPHY_CMN_TIMING_CTRL_8, DSIPHY_CMN_TIMING_CTRL_9,
+			  cfg->timing.lane_v3[8], cfg->timing.lane_v3[9]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL15,
+			  DSIPHY_CMN_TIMING_CTRL_10, DSIPHY_CMN_TIMING_CTRL_11,
+			  cfg->timing.lane_v3[10], cfg->timing.lane_v3[11]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL16,
+			  DSIPHY_CMN_CTRL_0, DSIPHY_CMN_LANE_CTRL0,
+			  0x7f, 0x1f);
+	} else {
+		reg = DSI_R32(phy, DSIPHY_CMN_CLK_CFG0);
+		reg &= ~BIT(5);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL0,
+			  DSIPHY_CMN_CLK_CFG0, DSIPHY_CMN_PLL_CNTRL,
+			  reg, 0x0);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL1,
+			  DSIPHY_CMN_RBUF_CTRL, DSIPHY_CMN_GLBL_CTRL,
+			  0x0, 0x10);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL2,
+			  DSIPHY_CMN_VREG_CTRL, DSIPHY_CMN_TIMING_CTRL_0,
+			  0x59, cfg->timing.lane_v3[0]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL3,
+			  DSIPHY_CMN_TIMING_CTRL_1, DSIPHY_CMN_TIMING_CTRL_2,
+			  cfg->timing.lane_v3[1], cfg->timing.lane_v3[2]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL4,
+			  DSIPHY_CMN_TIMING_CTRL_3, DSIPHY_CMN_TIMING_CTRL_4,
+			  cfg->timing.lane_v3[3], cfg->timing.lane_v3[4]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL5,
+			  DSIPHY_CMN_TIMING_CTRL_5, DSIPHY_CMN_TIMING_CTRL_6,
+			  cfg->timing.lane_v3[5], cfg->timing.lane_v3[6]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL6,
+			  DSIPHY_CMN_TIMING_CTRL_7, DSIPHY_CMN_TIMING_CTRL_8,
+			  cfg->timing.lane_v3[7], cfg->timing.lane_v3[8]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL7,
+			  DSIPHY_CMN_TIMING_CTRL_9, DSIPHY_CMN_TIMING_CTRL_10,
+			  cfg->timing.lane_v3[9], cfg->timing.lane_v3[10]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL8,
+			  DSIPHY_CMN_TIMING_CTRL_11, DSIPHY_CMN_CTRL_0,
+			  cfg->timing.lane_v3[11], 0x7f);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
+			  DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_2,
+			  0x1f, 0x40);
+		/*
+		 * fill with dummy register writes since controller will blindly
+		 * send these values to DSI PHY.
+		 */
+		reg = DSI_DYN_REFRESH_PLL_CTRL11;
+		while (reg <= DSI_DYN_REFRESH_PLL_CTRL29) {
+			DSI_DYN_REF_REG_W(phy->dyn_pll_base, reg,
+				  DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_0,
+				  0x1f, 0x7f);
+			reg += 0x4;
+		}
+
+		DSI_GEN_W32(phy->dyn_pll_base,
+			    DSI_DYN_REFRESH_PLL_UPPER_ADDR, 0);
+		DSI_GEN_W32(phy->dyn_pll_base,
+			    DSI_DYN_REFRESH_PLL_UPPER_ADDR2, 0);
+	}
+
+	wmb(); /* make sure all registers are updated */
+}
+
+void dsi_phy_hw_v3_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
+					    struct dsi_dyn_clk_delay *delay)
+{
+	if (!delay)
+		return;
+
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY,
+		    delay->pipe_delay);
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY2,
+		    delay->pipe_delay2);
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_DELAY,
+		    delay->pll_delay);
+}
+
+void dsi_phy_hw_v3_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset)
+{
+	u32 reg;
+
+	/*
+	 * if no offset is mentioned then this means we want to clear
+	 * the dynamic refresh ctrl register which is the last step
+	 * of dynamic refresh sequence.
+	 */
+	if (!offset) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg &= ~(BIT(0) | BIT(8));
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+		wmb(); /* ensure dynamic fps is cleared */
+		return;
+	}
+
+	if (offset & BIT(DYN_REFRESH_INTF_SEL)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(13);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SYNC_MODE)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(16);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SWI_CTRL)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(0);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SW_TRIGGER)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(8);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+		wmb(); /* ensure dynamic fps is triggered */
+	}
+}
+
+int dsi_phy_hw_v3_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
+				      u32 *dst, u32 size)
+{
+	int i;
+
+	if (!timings || !dst || !size)
+		return -EINVAL;
+
+	if (size != DSI_PHY_TIMING_V3_SIZE) {
+		pr_err("size mis-match\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size; i++)
+		dst[i] = timings->lane_v3[i];
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c
index fdfaa5d..44d0928 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c
@@ -511,11 +511,14 @@
  * @mode:     Mode information for which timing has to be calculated.
  * @config:   DSI host configuration for this mode.
  * @timing:   Timing parameters for each lane which will be returned.
+ * @use_mode_bit_clk: Boolean to indicate whether reacalculate dsi
+ *		bit clk or use the existing bit clk(for dynamic clk case).
  */
 int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
-					    struct dsi_mode_info *mode,
-					    struct dsi_host_common_cfg *host,
-					   struct dsi_phy_per_lane_cfgs *timing)
+				       struct dsi_mode_info *mode,
+				       struct dsi_host_common_cfg *host,
+				       struct dsi_phy_per_lane_cfgs *timing,
+				       bool use_mode_bit_clk)
 {
 	/* constants */
 	u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */
@@ -541,7 +544,7 @@
 	struct phy_timing_ops *ops = phy->ops.timing_ops;
 
 	memset(&desc, 0x0, sizeof(desc));
-	h_total = DSI_H_TOTAL(mode);
+	h_total = DSI_H_TOTAL_DSC(mode);
 	v_total = DSI_V_TOTAL(mode);
 
 	bpp = bits_per_pixel[host->dst_format];
@@ -558,7 +561,10 @@
 		num_of_lanes++;
 
 
-	x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
+	if (use_mode_bit_clk)
+		x = mode->clk_rate_hz;
+	else
+		x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
 	y = rounddown(x, 1);
 
 	clk_params.bitclk_mbps = rounddown(DIV_ROUND_UP_ULL(y, 1000000), 1);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 20cae2e..a4d71f7 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -124,7 +124,8 @@
 	int conn_cnt = 0;
 
 	if (msm_is_mode_seamless(&crtc_state->mode) ||
-		msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode))
+		msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode) ||
+		msm_is_mode_seamless_dyn_clk(&crtc_state->adjusted_mode))
 		return true;
 
 	if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable)
@@ -168,6 +169,10 @@
 			&connector->encoder->crtc->state->adjusted_mode))
 		return true;
 
+	if (msm_is_mode_seamless_dyn_clk(
+			 &connector->encoder->crtc->state->adjusted_mode))
+		return true;
+
 	if (msm_is_mode_seamless_dms(
 			&connector->encoder->crtc->state->adjusted_mode))
 		return true;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e99ff9c..f5f6853 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -38,6 +38,8 @@
 #define MSM_MODE_FLAG_SEAMLESS_DMS			(1<<2)
 /* Request to switch the fps */
 #define MSM_MODE_FLAG_SEAMLESS_VRR			(1<<3)
+/* Request to switch the bit clk */
+#define MSM_MODE_FLAG_SEAMLESS_DYN_CLK			(1<<4)
 
 /* As there are different display controller blocks depending on the
  * snapdragon version, the kms support is split out and the appropriate
@@ -175,6 +177,13 @@
 		: false;
 }
 
+static inline bool msm_is_mode_seamless_dyn_clk(
+					const struct drm_display_mode *mode)
+{
+	return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DYN_CLK)
+		: false;
+}
+
 static inline bool msm_needs_vblank_pre_modeset(
 		const struct drm_display_mode *mode)
 {
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 04f16ca..85b9f7e 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -1949,6 +1949,9 @@
 
 		sde_kms_info_add_keystr(info, "mode_name", mode->name);
 
+		sde_kms_info_add_keyint(info, "bit_clk_rate",
+					mode_info.clk_rate);
+
 		topology_idx = (int)sde_rm_get_topology_name(
 							mode_info.topology);
 		if (topology_idx < SDE_RM_TOPOLOGY_MAX) {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 73829da..e0094d7 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -748,8 +748,9 @@
 	SDE_DEBUG("\n");
 
 	if ((msm_is_mode_seamless(adjusted_mode) ||
-			msm_is_mode_seamless_vrr(adjusted_mode)) &&
-		(!crtc->enabled)) {
+	     (msm_is_mode_seamless_vrr(adjusted_mode) ||
+	      msm_is_mode_seamless_dyn_clk(adjusted_mode))) &&
+	    (!crtc->enabled)) {
 		SDE_ERROR("crtc state prevents seamless transition\n");
 		return false;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 56c288f..5bfae1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -271,12 +271,16 @@
 		nv_connector->edid = NULL;
 	}
 
-	/* Outputs are only polled while runtime active, so acquiring a
-	 * runtime PM ref here is unnecessary (and would deadlock upon
-	 * runtime suspend because it waits for polling to finish).
+	/* Outputs are only polled while runtime active, so resuming the
+	 * device here is unnecessary (and would deadlock upon runtime suspend
+	 * because it waits for polling to finish). We do however, want to
+	 * prevent the autosuspend timer from elapsing during this operation
+	 * if possible.
 	 */
-	if (!drm_kms_helper_is_poll_worker()) {
-		ret = pm_runtime_get_sync(connector->dev->dev);
+	if (drm_kms_helper_is_poll_worker()) {
+		pm_runtime_get_noresume(dev->dev);
+	} else {
+		ret = pm_runtime_get_sync(dev->dev);
 		if (ret < 0 && ret != -EACCES)
 			return conn_status;
 	}
@@ -354,10 +358,8 @@
 
  out:
 
-	if (!drm_kms_helper_is_poll_worker()) {
-		pm_runtime_mark_last_busy(connector->dev->dev);
-		pm_runtime_put_autosuspend(connector->dev->dev);
-	}
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
 
 	return conn_status;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6526a33..3ddd409 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -367,8 +367,6 @@
 	pm_runtime_get_sync(drm->dev->dev);
 
 	drm_helper_hpd_irq_event(drm->dev);
-	/* enable polling for external displays */
-	drm_kms_helper_poll_enable(drm->dev);
 
 	pm_runtime_mark_last_busy(drm->dev->dev);
 	pm_runtime_put_sync(drm->dev->dev);
@@ -391,15 +389,29 @@
 {
 	struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
 	struct acpi_bus_event *info = data;
+	int ret;
 
 	if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
 		if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
-			/*
-			 * This may be the only indication we receive of a
-			 * connector hotplug on a runtime suspended GPU,
-			 * schedule hpd_work to check.
-			 */
-			schedule_work(&drm->hpd_work);
+			ret = pm_runtime_get(drm->dev->dev);
+			if (ret == 1 || ret == -EACCES) {
+				/* If the GPU is already awake, or in a state
+				 * where we can't wake it up, it can handle
+				 * it's own hotplug events.
+				 */
+				pm_runtime_put_autosuspend(drm->dev->dev);
+			} else if (ret == 0) {
+				/* This may be the only indication we receive
+				 * of a connector hotplug on a runtime
+				 * suspended GPU, schedule hpd_work to check.
+				 */
+				NV_DEBUG(drm, "ACPI requested connector reprobe\n");
+				schedule_work(&drm->hpd_work);
+				pm_runtime_put_noidle(drm->dev->dev);
+			} else {
+				NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
+					ret);
+			}
 
 			/* acpi-video should not generate keypresses for this */
 			return NOTIFY_BAD;
@@ -422,6 +434,11 @@
 	if (ret)
 		return ret;
 
+	/* enable connector detection and polling for connectors without HPD
+	 * support
+	 */
+	drm_kms_helper_poll_enable(dev);
+
 	/* enable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct nouveau_connector *conn = nouveau_connector(connector);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index a410c0d..6a1b81e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -161,7 +161,8 @@
 	}
 
 	/* load and execute some other ucode image (bios therm?) */
-	return pmu_load(init, 0x01, post, NULL, NULL);
+	pmu_load(init, 0x01, post, NULL, NULL);
+	return 0;
 }
 
 static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index aad2f4a..97828fa 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -283,7 +283,6 @@
 		remote = of_graph_get_remote_port_parent(ep);
 		if (!remote) {
 			DRM_DEBUG_DRIVER("Error retrieving the output node\n");
-			of_node_put(remote);
 			continue;
 		}
 
@@ -297,11 +296,13 @@
 
 			if (of_graph_parse_endpoint(ep, &endpoint)) {
 				DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
+				of_node_put(remote);
 				continue;
 			}
 
 			if (!endpoint.id) {
 				DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
+				of_node_put(remote);
 				continue;
 			}
 		}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index f8c9f6f..a2d8630 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -327,6 +327,9 @@
 	vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
 						       vc4_state->crtc_h);
 
+	vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
+			       vc4_state->y_scaling[0] == VC4_SCALING_NONE);
+
 	if (num_planes > 1) {
 		vc4_state->is_yuv = true;
 
@@ -342,24 +345,17 @@
 			vc4_get_scaling_mode(vc4_state->src_h[1],
 					     vc4_state->crtc_h);
 
-		/* YUV conversion requires that scaling be enabled,
-		 * even on a plane that's otherwise 1:1.  Choose TPZ
-		 * for simplicity.
+		/* YUV conversion requires that horizontal scaling be enabled,
+		 * even on a plane that's otherwise 1:1. Looks like only PPF
+		 * works in that case, so let's pick that one.
 		 */
-		if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
-			vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
-		if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
-			vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+		if (vc4_state->is_unity)
+			vc4_state->x_scaling[0] = VC4_SCALING_PPF;
 	} else {
 		vc4_state->x_scaling[1] = VC4_SCALING_NONE;
 		vc4_state->y_scaling[1] = VC4_SCALING_NONE;
 	}
 
-	vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
-			       vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
-			       vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
-			       vc4_state->y_scaling[1] == VC4_SCALING_NONE);
-
 	/* No configuring scaling on the cursor plane, since it gets
 	   non-vblank-synced updates, and scaling requires requires
 	   LBM changes which have to be vblank-synced.
@@ -614,7 +610,10 @@
 		vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
 	}
 
-	if (!vc4_state->is_unity) {
+	if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+	    vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+	    vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+	    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
 		/* LBM Base Address. */
 		if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
 		    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 2a1d352..aef802c 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -137,8 +137,11 @@
 		break;
 	}
 	case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
-		seq_printf(s, "sync: [%pK] %s", sync_event->handle,
-				sync_event->fence_name);
+		int i;
+
+		for (i = 0; i < sync_event->info.num_fences; i++)
+			seq_printf(s, "sync: %s",
+				sync_event->info.fences[i].name);
 		break;
 	}
 	default:
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 6876796..a769915 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -472,6 +472,7 @@
 {
 	struct kgsl_device *device;
 	struct adreno_device *adreno_dev;
+	struct adreno_gpudev *gpudev;
 	struct adreno_context *drawctxt;
 	struct adreno_ringbuffer *rb;
 	int ret, count, i;
@@ -482,6 +483,7 @@
 
 	device = context->device;
 	adreno_dev = ADRENO_DEVICE(device);
+	gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	drawctxt = ADRENO_CONTEXT(context);
 	rb = drawctxt->rb;
 
@@ -562,6 +564,9 @@
 
 	mutex_unlock(&device->mutex);
 
+	if (gpudev->preemption_context_destroy)
+		gpudev->preemption_context_destroy(context);
+
 	/* wake threads waiting to submit commands from this context */
 	wake_up_all(&drawctxt->waiting);
 	wake_up_all(&drawctxt->wq);
@@ -570,18 +575,10 @@
 void adreno_drawctxt_destroy(struct kgsl_context *context)
 {
 	struct adreno_context *drawctxt;
-	struct adreno_device *adreno_dev;
-	struct adreno_gpudev *gpudev;
 
 	if (context == NULL)
 		return;
 
-	adreno_dev = ADRENO_DEVICE(context->device);
-	gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-
-	if (gpudev->preemption_context_destroy)
-		gpudev->preemption_context_destroy(context);
-
 	drawctxt = ADRENO_CONTEXT(context);
 	kfree(drawctxt);
 }
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index eb4c29d..aba9ed0 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2018,7 +2018,7 @@
 	}
 
 	handle = kgsl_sync_fence_async_wait(event.fd,
-		gpuobj_free_fence_func, entry, NULL, 0);
+		gpuobj_free_fence_func, entry, NULL);
 
 	if (IS_ERR(handle)) {
 		kgsl_mem_entry_unset_pend(entry);
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index 3dbaea4..a1e0f4c 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -43,6 +43,17 @@
 static struct kmem_cache *memobjs_cache;
 static struct kmem_cache *sparseobjs_cache;
 
+static void free_fence_names(struct kgsl_drawobj_sync *syncobj)
+{
+	unsigned int i;
+
+	for (i = 0; i < syncobj->numsyncs; i++) {
+		struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i];
+
+		if (event->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE)
+			kfree(event->info.fences);
+	}
+}
 
 void kgsl_drawobj_destroy_object(struct kref *kref)
 {
@@ -55,6 +66,7 @@
 	switch (drawobj->type) {
 	case SYNCOBJ_TYPE:
 		syncobj = SYNCOBJ(drawobj);
+		free_fence_names(syncobj);
 		kfree(syncobj->synclist);
 		kfree(syncobj);
 		break;
@@ -94,11 +106,16 @@
 				retired);
 			break;
 		}
-		case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
-			dev_err(device->dev, "  fence: %s\n",
-					event->fence_name);
+		case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
+			int j;
+			struct event_fence_info *info = &event->info;
+
+			for (j = 0; j < info->num_fences; j++)
+				dev_err(device->dev, "[%d]  fence: %s\n",
+					i, info->fences[j].name);
 			break;
 		}
+		}
 	}
 }
 
@@ -146,11 +163,16 @@
 			dev_err(device->dev, "       [%d] TIMESTAMP %d:%d\n",
 				i, event->context->id, event->timestamp);
 			break;
-		case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
-			dev_err(device->dev, "       [%d] FENCE %s\n",
-					i, event->fence_name);
+		case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
+			int j;
+			struct event_fence_info *info = &event->info;
+
+			for (j = 0; j < info->num_fences; j++)
+				dev_err(device->dev, "       [%d] FENCE %s\n",
+					i, info->fences[j].name);
 			break;
 		}
+		}
 	}
 
 	kgsl_drawobj_put(drawobj);
@@ -332,8 +354,11 @@
 static bool drawobj_sync_fence_func(void *priv)
 {
 	struct kgsl_drawobj_sync_event *event = priv;
+	int i;
 
-	trace_syncpoint_fence_expire(event->syncobj, event->fence_name);
+	for (i = 0; i < event->info.num_fences; i++)
+		trace_syncpoint_fence_expire(event->syncobj,
+			event->info.fences[i].name);
 
 	/*
 	 * Only call kgsl_drawobj_put() if it's not marked for cancellation
@@ -359,7 +384,7 @@
 	struct kgsl_cmd_syncpoint_fence *sync = priv;
 	struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
 	struct kgsl_drawobj_sync_event *event;
-	unsigned int id;
+	unsigned int id, i;
 
 	kref_get(&drawobj->refcount);
 
@@ -377,7 +402,7 @@
 
 	event->handle = kgsl_sync_fence_async_wait(sync->fd,
 				drawobj_sync_fence_func, event,
-				event->fence_name, sizeof(event->fence_name));
+				&event->info);
 
 	if (IS_ERR_OR_NULL(event->handle)) {
 		int ret = PTR_ERR(event->handle);
@@ -397,7 +422,8 @@
 		return ret;
 	}
 
-	trace_syncpoint_fence(syncobj, event->fence_name);
+	for (i = 0; i < event->info.num_fences; i++)
+		trace_syncpoint_fence(syncobj, event->info.fences[i].name);
 
 	return 0;
 }
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
index 06eef7f..bd32f5e 100644
--- a/drivers/gpu/msm/kgsl_drawobj.h
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -107,6 +107,15 @@
 
 #define KGSL_FENCE_NAME_LEN 74
 
+struct fence_info {
+	char name[KGSL_FENCE_NAME_LEN];
+};
+
+struct event_fence_info {
+	struct fence_info *fences;
+	int num_fences;
+};
+
 /**
  * struct kgsl_drawobj_sync_event
  * @id: identifer (positiion within the pending bitmap)
@@ -116,8 +125,8 @@
  *           register this event
  * @timestamp: Pending timestamp for the event
  * @handle: Pointer to a sync fence handle
- * @fence_name: A fence name string to describe the fence
  * @device: Pointer to the KGSL device
+ * @info: structure to hold info about the fence
  */
 struct kgsl_drawobj_sync_event {
 	unsigned int id;
@@ -126,8 +135,8 @@
 	struct kgsl_context *context;
 	unsigned int timestamp;
 	struct kgsl_sync_fence_cb *handle;
-	char fence_name[KGSL_FENCE_NAME_LEN];
 	struct kgsl_device *device;
+	struct event_fence_info info;
 };
 
 /**
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index d484894..98c117d 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -438,27 +438,54 @@
 	}
 }
 
-static void kgsl_get_fence_name(struct fence *fence,
-	char *fence_name, int name_len)
+static void kgsl_get_fence_names(struct fence *fence,
+	struct event_fence_info *info_ptr)
 {
-	char *ptr = fence_name;
-	char *last = fence_name + name_len;
+	unsigned int num_fences;
+	struct fence **fences;
+	struct fence_array *array;
+	int i;
 
-	ptr +=  snprintf(ptr, last - ptr, "%s %s",
-			fence->ops->get_driver_name(fence),
-			fence->ops->get_timeline_name(fence));
-
-	if ((ptr + 2) >= last)
+	if (!info_ptr)
 		return;
 
-	if (fence->ops->fence_value_str) {
-		ptr += snprintf(ptr, last - ptr, ": ");
-		fence->ops->fence_value_str(fence, ptr, last - ptr);
+	array = to_fence_array(fence);
+
+	if (array != NULL) {
+		num_fences = array->num_fences;
+		fences = array->fences;
+	} else {
+		num_fences = 1;
+		fences = &fence;
+	}
+
+	info_ptr->fences = kcalloc(num_fences, sizeof(struct fence_info),
+			GFP_ATOMIC);
+	if (info_ptr->fences == NULL)
+		return;
+
+	info_ptr->num_fences = num_fences;
+
+	for (i = 0; i < num_fences; i++) {
+		struct fence *f = fences[i];
+		struct fence_info *fi = &info_ptr->fences[i];
+		int len;
+
+		len =  scnprintf(fi->name, sizeof(fi->name), "%s %s",
+			f->ops->get_driver_name(f),
+			f->ops->get_timeline_name(f));
+
+		if (f->ops->fence_value_str) {
+			len += scnprintf(fi->name + len, sizeof(fi->name) - len,
+				": ");
+			f->ops->fence_value_str(f, fi->name + len,
+				sizeof(fi->name) - len);
+		}
 	}
 }
 
 struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
-	bool (*func)(void *priv), void *priv, char *fence_name, int name_len)
+	bool (*func)(void *priv), void *priv, struct event_fence_info *info_ptr)
 {
 	struct kgsl_sync_fence_cb *kcb;
 	struct fence *fence;
@@ -479,8 +506,7 @@
 	kcb->priv = priv;
 	kcb->func = func;
 
-	if (fence_name)
-		kgsl_get_fence_name(fence, fence_name, name_len);
+	kgsl_get_fence_names(fence, info_ptr);
 
 	/* if status then error or signaled */
 	status = fence_add_callback(fence, &kcb->fence_cb,
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index 6998b40..a53cd51 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -1,3 +1,4 @@
+
 /* Copyright (c) 2012-2014,2017-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -93,7 +94,7 @@
 
 struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
 					bool (*func)(void *priv), void *priv,
-					char *fence_name, int name_len);
+					struct event_fence_info *info_ptr);
 
 void kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb);
 
@@ -139,7 +140,7 @@
 
 static inline struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
 					bool (*func)(void *priv), void *priv,
-					char *fence_name, int name_len)
+					struct event_fence_info *info_ptr)
 {
 	return NULL;
 }
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index cb2e85c..a8b8058 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -333,7 +333,8 @@
 		struct hid_field *field, struct hid_usage *usage,
 		unsigned long **bit, int *max)
 {
-	if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
+	if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
+			usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
 		/* The fn key on Apple USB keyboards */
 		set_bit(EV_REP, hi->input->evbit);
 		hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
@@ -479,6 +480,12 @@
 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
 		.driver_data = APPLE_HAS_FN },
+	{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
+		.driver_data = APPLE_HAS_FN },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+		.driver_data = APPLE_HAS_FN },
+	{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+		.driver_data = APPLE_HAS_FN },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
 		.driver_data = APPLE_HAS_FN },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2059d9d..48856a0 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2064,6 +2064,10 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0280e28..a784464 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -83,6 +83,7 @@
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD	0x3101
 
 #define USB_VENDOR_ID_APPLE		0x05ac
+#define BT_VENDOR_ID_APPLE		0x004c
 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE	0x0304
 #define USB_DEVICE_ID_APPLE_MAGICMOUSE	0x030d
 #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD	0x030e
@@ -152,6 +153,7 @@
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO   0x0256
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS   0x0257
 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI   0x0267
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI   0x026c
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI	0x0290
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO	0x0291
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS	0x0292
@@ -888,6 +890,7 @@
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD	0xff17
 #define USB_DEVICE_ID_SAITEK_PS1000	0x0621
 #define USB_DEVICE_ID_SAITEK_RAT7_OLD	0x0ccb
+#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION	0x0ccd
 #define USB_DEVICE_ID_SAITEK_RAT7	0x0cd7
 #define USB_DEVICE_ID_SAITEK_RAT9	0x0cfa
 #define USB_DEVICE_ID_SAITEK_MMO7	0x0cd0
@@ -927,6 +930,8 @@
 #define USB_DEVICE_ID_SONY_PS3_BDREMOTE		0x0306
 #define USB_DEVICE_ID_SONY_PS3_CONTROLLER	0x0268
 #define USB_DEVICE_ID_SONY_PS4_CONTROLLER	0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2	0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE	0x0ba0
 #define USB_DEVICE_ID_SONY_MOTION_CONTROLLER	0x03d5
 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER	0x042f
 #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER		0x0002
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 1b0084d..28373da 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -955,6 +955,8 @@
 
 	ret = sysfs_create_group(&hdev->dev.kobj,
 			&ntrig_attribute_group);
+	if (ret)
+		hid_err(hdev, "cannot create sysfs group\n");
 
 	return 0;
 err_free:
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 39e6426..683861f 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -183,6 +183,8 @@
 		.driver_data = SAITEK_RELEASE_MODE_RAT7 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
 		.driver_data = SAITEK_RELEASE_MODE_RAT7 },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
+		.driver_data = SAITEK_RELEASE_MODE_RAT7 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
 		.driver_data = SAITEK_RELEASE_MODE_RAT7 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 1b1dccd..eee58d1 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -2581,6 +2581,12 @@
 		.driver_data = DUALSHOCK4_CONTROLLER_USB },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
 		.driver_data = DUALSHOCK4_CONTROLLER_BT },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+		.driver_data = DUALSHOCK4_CONTROLLER_USB },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+		.driver_data = DUALSHOCK4_CONTROLLER_BT },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+		.driver_data = DUALSHOCK4_CONTROLLER_USB },
 	/* Nyko Core Controller for PS3 */
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
 		.driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 3cefd1a..9c262d9 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -274,14 +274,18 @@
 	return clamp_val(reg, 0, 1023) & (0xff << 2);
 }
 
-static u16 adt7475_read_word(struct i2c_client *client, int reg)
+static int adt7475_read_word(struct i2c_client *client, int reg)
 {
-	u16 val;
+	int val1, val2;
 
-	val = i2c_smbus_read_byte_data(client, reg);
-	val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
+	val1 = i2c_smbus_read_byte_data(client, reg);
+	if (val1 < 0)
+		return val1;
+	val2 = i2c_smbus_read_byte_data(client, reg + 1);
+	if (val2 < 0)
+		return val2;
 
-	return val;
+	return val1 | (val2 << 8);
 }
 
 static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index ac63e56..9ac6e16 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -17,7 +17,7 @@
  * Bi-directional Current/Power Monitor with I2C Interface
  * Datasheet: http://www.ti.com/product/ina230
  *
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
  * Thanks to Jan Volkering
  *
  * This program is free software; you can redistribute it and/or modify
@@ -328,6 +328,15 @@
 	return 0;
 }
 
+static ssize_t ina2xx_show_shunt(struct device *dev,
+			      struct device_attribute *da,
+			      char *buf)
+{
+	struct ina2xx_data *data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
+}
+
 static ssize_t ina2xx_store_shunt(struct device *dev,
 				  struct device_attribute *da,
 				  const char *buf, size_t count)
@@ -402,7 +411,7 @@
 
 /* shunt resistance */
 static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
-			  ina2xx_show_value, ina2xx_store_shunt,
+			  ina2xx_show_shunt, ina2xx_store_shunt,
 			  INA2XX_CALIBRATION);
 
 /* update interval (ina226 only) */
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index 5399670..d289c9d 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -34,6 +34,7 @@
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/thermal.h>
+#include <linux/qpnp/qpnp-revid.h>
 
 /* QPNP VADC register definition */
 #define QPNP_VADC_REVISION1				0x0
@@ -203,6 +204,7 @@
 	struct power_supply		*vadc_chg_vote;
 	bool				vadc_hc;
 	int				vadc_debug_count;
+	struct pmic_revid_data		*pmic_rev_id;
 	struct sensor_device_attribute	sens_attr[0];
 };
 
@@ -465,6 +467,44 @@
 	pr_debug("VADC_DIG_PARAM value:0x%x\n", *data);
 }
 
+static int qpnp_vadc_channel_check(struct qpnp_vadc_chip *vadc, u8 buf)
+{
+	int rc = 0;
+	u8 chno = 0;
+
+	rc = qpnp_vadc_read_reg(vadc,
+		QPNP_VADC_HC1_ADC_CH_SEL_CTL, &chno, 1);
+	if (rc < 0) {
+		pr_err("Channel reread failed\n");
+		return rc;
+	}
+
+	if (buf != chno) {
+		pr_debug("channel write fails once: written:0x%x actual:0x%x\n",
+			chno, buf);
+
+		rc = qpnp_vadc_write_reg(vadc,
+			QPNP_VADC_HC1_ADC_CH_SEL_CTL, &buf, 1);
+		if (rc < 0) {
+			pr_err("qpnp adc register configure failed\n");
+			return rc;
+		}
+
+		rc = qpnp_vadc_read_reg(vadc,
+			QPNP_VADC_HC1_ADC_CH_SEL_CTL, &chno, 1);
+		if (rc < 0) {
+			pr_err("qpnp adc configure read failed\n");
+			return rc;
+		}
+
+		if (chno != buf) {
+			pr_err("Write fails twice: written: 0x%x\n", chno);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
 static int qpnp_vadc_hc_pre_configure_usb_in(struct qpnp_vadc_chip *vadc,
 						int dt_index)
 {
@@ -472,6 +512,11 @@
 	u8 buf;
 	u8 dig_param = 0;
 	struct qpnp_adc_amux_properties conv;
+	bool channel_check = false;
+
+	if (vadc->pmic_rev_id)
+		if (vadc->pmic_rev_id->pmic_subtype == PMI632_SUBTYPE)
+			channel_check = true;
 
 	/* Setup dig params for USB_IN_V */
 	conv.decimation = DECIMATION_TYPE2;
@@ -496,6 +541,12 @@
 	if (rc < 0)
 		return rc;
 
+	if (channel_check) {
+		rc = qpnp_vadc_channel_check(vadc, buf);
+		if (rc)
+			return rc;
+	}
+
 	buf = QPNP_VADC_HC1_ADC_EN;
 	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_EN_CTL1, &buf, 1);
 	if (rc < 0)
@@ -520,6 +571,12 @@
 	if (rc < 0)
 		return rc;
 
+	if (channel_check) {
+		rc = qpnp_vadc_channel_check(vadc, buf);
+		if (rc)
+			return rc;
+	}
+
 	/* Wait for GND read to complete */
 	rc = qpnp_vadc_wait_for_eoc(vadc);
 	if (rc < 0)
@@ -541,10 +598,16 @@
 				struct qpnp_adc_amux_properties *amux_prop)
 {
 	int rc = 0;
-	u8 buf[6];
+	u8 buf[5];
+	u8 conv_req = 0;
+	bool channel_check = false;
+
+	if (vadc->pmic_rev_id)
+		if (vadc->pmic_rev_id->pmic_subtype == PMI632_SUBTYPE)
+			channel_check = true;
 
 	/* Read registers 0x42 through 0x46 */
-	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 6);
+	rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 5);
 	if (rc < 0) {
 		pr_err("qpnp adc configure block read failed\n");
 		return rc;
@@ -568,7 +631,7 @@
 	buf[4] |= QPNP_VADC_HC1_ADC_EN;
 
 	/* Select CONV request */
-	buf[5] |= QPNP_VADC_HC1_CONV_REQ_START;
+	conv_req = QPNP_VADC_HC1_CONV_REQ_START;
 
 	if (!vadc->vadc_poll_eoc)
 		reinit_completion(&vadc->adc->adc_rslt_completion);
@@ -577,7 +640,20 @@
 		buf[0], buf[1], buf[2], buf[3]);
 
 	/* Block register write from 0x42 through 0x46 */
-	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 6);
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 5);
+	if (rc < 0) {
+		pr_err("qpnp adc block register configure failed\n");
+		return rc;
+	}
+
+	if (channel_check) {
+		rc = qpnp_vadc_channel_check(vadc, buf[2]);
+		if (rc)
+			return rc;
+	}
+
+	rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_CONV_REQ,
+						&conv_req, 1);
 	if (rc < 0) {
 		pr_err("qpnp adc block register configure failed\n");
 		return rc;
@@ -2711,7 +2787,7 @@
 	struct qpnp_vadc_chip *vadc;
 	struct qpnp_adc_drv *adc_qpnp;
 	struct qpnp_vadc_thermal_data *adc_thermal;
-	struct device_node *node = pdev->dev.of_node;
+	struct device_node *node = pdev->dev.of_node, *revid_dev_node;
 	struct device_node *child;
 	const struct of_device_id *id;
 	int rc, count_adc_channel_list = 0, i = 0;
@@ -2765,6 +2841,16 @@
 		return -ENOMEM;
 	}
 
+	revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+	if (revid_dev_node) {
+		vadc->pmic_rev_id = get_revid_data(revid_dev_node);
+		if (IS_ERR(vadc->pmic_rev_id)) {
+			pr_err("Unable to get revid\n");
+			vadc->pmic_rev_id = NULL;
+		}
+		of_node_put(revid_dev_node);
+	}
+
 	vadc->vadc_therm_chan = adc_thermal;
 	if (!strcmp(id->compatible, "qcom,qpnp-vadc-hc")) {
 		vadc->vadc_hc = true;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 663017f..26f1691 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1408,6 +1408,13 @@
 }
 
 #ifdef CONFIG_ACPI
+static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
+				      acpi_physical_address address)
+{
+	return address >= priv->smba &&
+	       address <= pci_resource_end(priv->pci_dev, SMBBAR);
+}
+
 static acpi_status
 i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
 		     u64 *value, void *handler_context, void *region_context)
@@ -1423,7 +1430,7 @@
 	 */
 	mutex_lock(&priv->acpi_lock);
 
-	if (!priv->acpi_reserved) {
+	if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
 		priv->acpi_reserved = true;
 
 		dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 7aa7b9c..efefcfa 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -152,6 +152,7 @@
 			mt_params[3].type = ACPI_TYPE_INTEGER;
 			mt_params[3].integer.value = len;
 			mt_params[4].type = ACPI_TYPE_BUFFER;
+			mt_params[4].buffer.length = len;
 			mt_params[4].buffer.pointer = data->block + 1;
 		}
 		break;
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index db9105e..0da4991 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -400,11 +400,8 @@
 		return ret;
 
 	for (msg = msgs; msg < emsg; msg++) {
-		/* If next message is read, skip the stop condition */
-		bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-		/* but, force it if I2C_M_STOP is set */
-		if (msg->flags & I2C_M_STOP)
-			stop = true;
+		/* Emit STOP if it is the last message or I2C_M_STOP is set. */
+		bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
 		ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
 		if (ret)
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 56e92af..fdfcee92 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -247,11 +247,8 @@
 		return ret;
 
 	for (msg = msgs; msg < emsg; msg++) {
-		/* If next message is read, skip the stop condition */
-		bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-		/* but, force it if I2C_M_STOP is set */
-		if (msg->flags & I2C_M_STOP)
-			stop = true;
+		/* Emit STOP if it is the last message or I2C_M_STOP is set. */
+		bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
 		ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
 		if (ret)
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 0a8b763..770af2f 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -40,6 +40,8 @@
 
 source "drivers/iio/imu/inv_mpu6050/Kconfig"
 source "drivers/iio/imu/inv_icm20602/Kconfig"
+source "drivers/iio/imu/inv_mpu/Kconfig"
+source "drivers/iio/imu/st_asm330lhh/Kconfig"
 
 endmenu
 
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index fab6a5e..c70e984 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -14,6 +14,8 @@
 obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
 
 obj-y += bmi160/
+obj-y += inv_mpu/
+obj-y += st_asm330lhh/
 obj-y += inv_mpu6050/
 obj-y += inv_icm20602/
 
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index dbfd854..1d90a12 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -87,7 +87,7 @@
 	}
 
 	ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
-	if (ret < nents) {
+	if (ret < 0 || ret < nents) {
 		ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
 		return -EINVAL;
 	}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 3bef6d4..fa9ef8e 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -124,6 +124,8 @@
 static DEFINE_IDR(ctx_idr);
 static DEFINE_IDR(multicast_idr);
 
+static const struct file_operations ucma_fops;
+
 static inline struct ucma_context *_ucma_find_context(int id,
 						      struct ucma_file *file)
 {
@@ -1545,6 +1547,10 @@
 	f = fdget(cmd.fd);
 	if (!f.file)
 		return -ENOENT;
+	if (f.file->f_op != &ucma_fops) {
+		ret = -EINVAL;
+		goto file_put;
+	}
 
 	/* Validate current fd and prevent destruction of id. */
 	ctx = ucma_get_ctx(f.file->private_data, cmd.id);
@@ -1714,6 +1720,8 @@
 		mutex_lock(&mut);
 		if (!ctx->closing) {
 			mutex_unlock(&mut);
+			ucma_put_ctx(ctx);
+			wait_for_completion(&ctx->comp);
 			/* rdma_destroy_id ensures that no event handlers are
 			 * inflight for that id before releasing it.
 			 */
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cc2243f..bb45eb2 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1258,6 +1258,12 @@
 
 	t4_set_wq_in_error(&qhp->wq);
 	if (qhp->ibqp.uobject) {
+
+		/* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+		if (qhp->wq.flushed)
+			return;
+
+		qhp->wq.flushed = 1;
 		t4_set_cq_in_error(&rchp->cq);
 		spin_lock_irqsave(&rchp->comp_handler_lock, flag);
 		(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index d89b874..c2982bb 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -88,6 +88,7 @@
 	unsigned long flags;
 	int write = 1;	/* write sendctrl back */
 	int flush = 0;	/* re-read sendctrl to make sure it is flushed */
+	int i;
 
 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
 
@@ -97,9 +98,13 @@
 		reg |= SEND_CTRL_SEND_ENABLE_SMASK;
 	/* Fall through */
 	case PSC_DATA_VL_ENABLE:
+		mask = 0;
+		for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
+			if (!dd->vld[i].mtu)
+				mask |= BIT_ULL(i);
 		/* Disallow sending on VLs not enabled */
-		mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
-				SEND_CTRL_UNSUPPORTED_VL_SHIFT;
+		mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
+			SEND_CTRL_UNSUPPORTED_VL_SHIFT;
 		reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
 		break;
 	case PSC_GLOBAL_DISABLE:
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 77697d6..018a415 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -956,7 +956,7 @@
 			if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
 				if (++req->iov_idx == req->data_iovs) {
 					ret = -EFAULT;
-					goto free_txreq;
+					goto free_tx;
 				}
 				iovec = &req->iovs[req->iov_idx];
 				WARN_ON(iovec->offset);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 01a380e..14ddb75 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1511,12 +1511,18 @@
 	struct hfi1_pportdata *ppd;
 	struct hfi1_devdata *dd;
 	u8 sc5;
+	u8 sl;
 
 	/* test the mapping for validity */
 	ibp = to_iport(ibdev, ah_attr->port_num);
 	ppd = ppd_from_ibp(ibp);
-	sc5 = ibp->sl_to_sc[ah_attr->sl];
 	dd = dd_from_ppd(ppd);
+
+	sl = ah_attr->sl;
+	if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
+		return -EINVAL;
+
+	sc5 = ibp->sl_to_sc[sl];
 	if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
 		return -EINVAL;
 	return 0;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 463ea59..6463590 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2639,7 +2639,7 @@
 {
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
 	struct srp_rdma_ch *ch;
-	int i;
+	int i, j;
 	u8 status;
 
 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2653,8 +2653,8 @@
 
 	for (i = 0; i < target->ch_count; i++) {
 		ch = &target->ch[i];
-		for (i = 0; i < target->req_ring_size; ++i) {
-			struct srp_request *req = &ch->req_ring[i];
+		for (j = 0; j < target->req_ring_size; ++j) {
+			struct srp_request *req = &ch->req_ring[j];
 
 			srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
 		}
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 34ffa02..de41b16 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -214,6 +214,8 @@
 
 source "drivers/input/misc/Kconfig"
 
+source "drivers/input/sensors/smi130/Kconfig"
+
 source "drivers/input/rmi4/Kconfig"
 
 endif
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index d01a5b1..388ab27 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -33,4 +33,4 @@
 
 obj-$(CONFIG_INPUT_KEYRESET)	+= keyreset.o
 obj-$(CONFIG_INPUT_KEYCOMBO)	+= keycombo.o
-
+obj-y   += sensors/smi130/
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 4e77adb..c120afd 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1176,6 +1176,8 @@
 static const char * const middle_button_pnp_ids[] = {
 	"LEN2131", /* ThinkPad P52 w/ NFC */
 	"LEN2132", /* ThinkPad P52 */
+	"LEN2133", /* ThinkPad P72 w/ NFC */
+	"LEN2134", /* ThinkPad P72 */
 	NULL
 };
 
diff --git a/drivers/input/sensors/smi130/smi130_acc.c b/drivers/input/sensors/smi130/smi130_acc.c
index 4828b39..3979b65 100644
--- a/drivers/input/sensors/smi130/smi130_acc.c
+++ b/drivers/input/sensors/smi130/smi130_acc.c
@@ -154,7 +154,7 @@
 #include "bs_log.h"
 #define DRIVER_VERSION "0.0.53.0"
 #define ACC_NAME  "ACC"
-#define SMI_ACC2X2_ENABLE_INT1 1
+#define SMI_ACC2X2_ENABLE_INT2 1
 #define CONFIG_SMI_ACC_ENABLE_NEWDATA_INT 1
 
 #define SENSOR_NAME                 "smi130_acc"
@@ -6802,7 +6802,7 @@
 #endif
 
 	smi130_acc_get_interruptstatus1(smi130_acc->smi130_acc_client, &status);
-	PINFO("smi130_acc_irq_work_func, status = 0x%x\n", status);
+	PDEBUG("smi130_acc_irq_work_func, status = 0x%x\n", status);
 
 #ifdef CONFIG_SIG_MOTION
 	if (status & 0x04)	{
@@ -7404,64 +7404,6 @@
 	mutex_unlock(&data->enable_mutex);
 }
 
-#ifdef CONFIG_PM
-static int smi130_acc_suspend(struct i2c_client *client, pm_message_t mesg)
-{
-	struct smi130_acc_data *data = i2c_get_clientdata(client);
-
-	mutex_lock(&data->enable_mutex);
-	if (atomic_read(&data->enable) == 1) {
-		smi130_acc_set_mode(data->smi130_acc_client,
-			SMI_ACC2X2_MODE_SUSPEND, SMI_ACC_ENABLED_INPUT);
-#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
-		cancel_delayed_work_sync(&data->work);
-#endif
-	}
-	if (data->is_timer_running) {
-		hrtimer_cancel(&data->timer);
-		data->base_time = 0;
-		data->timestamp = 0;
-		data->fifo_time = 0;
-		data->acc_count = 0;
-	}
-	mutex_unlock(&data->enable_mutex);
-
-	return 0;
-}
-
-static int smi130_acc_resume(struct i2c_client *client)
-{
-	struct smi130_acc_data *data = i2c_get_clientdata(client);
-
-	mutex_lock(&data->enable_mutex);
-	if (atomic_read(&data->enable) == 1) {
-		smi130_acc_set_mode(data->smi130_acc_client,
-			SMI_ACC2X2_MODE_NORMAL, SMI_ACC_ENABLED_INPUT);
-#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
-		schedule_delayed_work(&data->work,
-				msecs_to_jiffies(atomic_read(&data->delay)));
-#endif
-	}
-	if (data->is_timer_running) {
-		hrtimer_start(&data->timer,
-					ns_to_ktime(data->time_odr),
-			HRTIMER_MODE_REL);
-		data->base_time = 0;
-		data->timestamp = 0;
-		data->is_timer_running = 1;
-	}
-	mutex_unlock(&data->enable_mutex);
-
-	return 0;
-}
-
-#else
-
-#define smi130_acc_suspend      NULL
-#define smi130_acc_resume       NULL
-
-#endif /* CONFIG_PM */
-
 static const struct i2c_device_id smi130_acc_id[] = {
 	{ SENSOR_NAME, 0 },
 	{ }
@@ -7480,8 +7422,6 @@
 		.name   = SENSOR_NAME,
 		.of_match_table = smi130_acc_of_match,
 	},
-	//.suspend    = smi130_acc_suspend,
-	//.resume     = smi130_acc_resume,
 	.id_table   = smi130_acc_id,
 	.probe      = smi130_acc_probe,
 	.remove     = smi130_acc_remove,
diff --git a/drivers/input/sensors/smi130/smi130_gyro_driver.c b/drivers/input/sensors/smi130/smi130_gyro_driver.c
index 65e303c..5ed9513 100644
--- a/drivers/input/sensors/smi130/smi130_gyro_driver.c
+++ b/drivers/input/sensors/smi130/smi130_gyro_driver.c
@@ -293,10 +293,9 @@
 static void smi_gyro_dump_reg(struct i2c_client *client);
 static int smi_gyro_check_chip_id(struct i2c_client *client);
 
-static int smi_gyro_pre_suspend(struct i2c_client *client);
-static int smi_gyro_post_resume(struct i2c_client *client);
-
 #ifdef CONFIG_HAS_EARLYSUSPEND
+static int smi_gyro_post_resume(struct i2c_client *client);
+static int smi_gyro_pre_suspend(struct i2c_client *client);
 static void smi_gyro_early_suspend(struct early_suspend *handler);
 static void smi_gyro_late_resume(struct early_suspend *handler);
 #endif
@@ -1812,6 +1811,7 @@
 	return err;
 }
 
+#ifdef CONFIG_HAS_EARLYSUSPEND
 static int smi_gyro_pre_suspend(struct i2c_client *client)
 {
 	int err = 0;
@@ -1861,7 +1861,6 @@
 	return err;
 }
 
-#ifdef CONFIG_HAS_EARLYSUSPEND
 static void smi_gyro_early_suspend(struct early_suspend *handler)
 {
 	int err = 0;
@@ -1902,45 +1901,6 @@
 
 	mutex_unlock(&client_data->mutex_op_mode);
 }
-#else
-static int smi_gyro_suspend(struct i2c_client *client, pm_message_t mesg)
-{
-	int err = 0;
-	struct smi_gyro_client_data *client_data =
-		(struct smi_gyro_client_data *)i2c_get_clientdata(client);
-
-	PINFO("function entrance");
-
-	mutex_lock(&client_data->mutex_op_mode);
-	if (client_data->enable) {
-		err = smi_gyro_pre_suspend(client);
-		err = SMI_GYRO_CALL_API(set_mode)(
-				SMI_GYRO_VAL_NAME(MODE_SUSPEND));
-	}
-	mutex_unlock(&client_data->mutex_op_mode);
-	return err;
-}
-
-static int smi_gyro_resume(struct i2c_client *client)
-{
-
-	int err = 0;
-	struct smi_gyro_client_data *client_data =
-		(struct smi_gyro_client_data *)i2c_get_clientdata(client);
-
-	PINFO("function entrance");
-
-	mutex_lock(&client_data->mutex_op_mode);
-
-	if (client_data->enable)
-		err = SMI_GYRO_CALL_API(set_mode)(SMI_GYRO_VAL_NAME(MODE_NORMAL));
-
-	/* post resume operation */
-	smi_gyro_post_resume(client);
-
-	mutex_unlock(&client_data->mutex_op_mode);
-	return err;
-}
 #endif
 
 void smi_gyro_shutdown(struct i2c_client *client)
@@ -2012,10 +1972,6 @@
 	.probe = smi_gyro_probe,
 	.remove = smi_gyro_remove,
 	.shutdown = smi_gyro_shutdown,
-#ifndef CONFIG_HAS_EARLYSUSPEND
-	//.suspend = smi_gyro_suspend,
-	//.resume = smi_gyro_resume,
-#endif
 };
 
 static int __init SMI_GYRO_init(void)
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c
index 0ba4faa..4db9da1 100644
--- a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c
@@ -1951,7 +1951,7 @@
 			return retval;
 		}
 
-		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, true);
 		if (retval < 0) {
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to wait for idle status (%d blocks remaining)\n",
@@ -1961,6 +1961,8 @@
 
 		block_ptr += (transfer * fwu->block_size);
 		remaining -= transfer;
+		dev_dbg(rmi4_data->pdev->dev.parent, "%s: remaining %d\n",
+					__func__, remaining);
 	} while (remaining);
 
 	return 0;
@@ -2010,7 +2012,7 @@
 			return retval;
 		}
 
-		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, true);
 		if (retval < 0) {
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to wait for idle status (block %d)\n",
@@ -2019,6 +2021,8 @@
 		}
 
 		block_ptr += fwu->block_size;
+		dev_dbg(rmi4_data->pdev->dev.parent, "%s: remaining %d\n",
+					__func__, block_cnt - blk);
 	}
 
 	return 0;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 0c910a8..16199b3 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2452,9 +2452,9 @@
 	}
 
 	if (amd_iommu_unmap_flush) {
-		dma_ops_free_iova(dma_dom, dma_addr, pages);
 		domain_flush_tlb(&dma_dom->domain);
 		domain_flush_complete(&dma_dom->domain);
+		dma_ops_free_iova(dma_dom, dma_addr, pages);
 	} else {
 		queue_add(dma_dom, dma_addr, pages);
 	}
diff --git a/drivers/leds/leds-qpnp-vibrator.c b/drivers/leds/leds-qpnp-vibrator.c
index cc2615d..81f54f0 100644
--- a/drivers/leds/leds-qpnp-vibrator.c
+++ b/drivers/leds/leds-qpnp-vibrator.c
@@ -391,7 +391,6 @@
 				 (vib->vib_play_ms % 1000) * 1000000),
 					HRTIMER_MODE_REL);
 	}
-	vib->vib_play_ms = 0;
 	mutex_unlock(&vib->lock);
 	schedule_work(&vib->work);
 
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index a184c98..62eb4b7 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1262,8 +1262,8 @@
 		if (hints_valid) {
 			r = dm_array_cursor_next(&cmd->hint_cursor);
 			if (r) {
-				DMERR("dm_array_cursor_next for hint failed");
-				goto out;
+				dm_array_cursor_end(&cmd->hint_cursor);
+				hints_valid = false;
 			}
 		}
 	}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index bed056c..f3993a4 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3390,8 +3390,13 @@
 
 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
 {
-	if (from_cblock(new_size) > from_cblock(cache->cache_size))
-		return true;
+	if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+		if (cache->sized) {
+			DMERR("%s: unable to extend cache due to missing cache table reload",
+			      cache_device_name(cache));
+			return false;
+		}
+	}
 
 	/*
 	 * We can't drop a dirty block when shrinking the cache.
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index ee75e35..3f389b2 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2880,6 +2880,11 @@
 		set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
 		rs_set_new(rs);
 	} else if (rs_is_recovering(rs)) {
+		/* Rebuild particular devices */
+		if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+			set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+			rs_setup_recovery(rs, MaxSector);
+		}
 		/* A recovering raid set may be resized */
 		; /* skip setup rs */
 	} else if (rs_is_reshaping(rs)) {
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index e976f4f..149fbac 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -190,6 +190,12 @@
 	sector_t data_block_size;
 
 	/*
+	 * We reserve a section of the metadata for commit overhead.
+	 * All reported space does *not* include this.
+	 */
+	dm_block_t metadata_reserve;
+
+	/*
 	 * Set if a transaction has to be aborted but the attempt to roll back
 	 * to the previous (good) transaction failed.  The only pool metadata
 	 * operation possible in this state is the closing of the device.
@@ -827,6 +833,20 @@
 	return dm_tm_commit(pmd->tm, sblock);
 }
 
+static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
+{
+	int r;
+	dm_block_t total;
+	dm_block_t max_blocks = 4096; /* 16M */
+
+	r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
+	if (r) {
+		DMERR("could not get size of metadata device");
+		pmd->metadata_reserve = max_blocks;
+	} else
+		pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
+}
+
 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
 					       sector_t data_block_size,
 					       bool format_device)
@@ -860,6 +880,8 @@
 		return ERR_PTR(r);
 	}
 
+	__set_metadata_reserve(pmd);
+
 	return pmd;
 }
 
@@ -1831,6 +1853,13 @@
 	down_read(&pmd->root_lock);
 	if (!pmd->fail_io)
 		r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+
+	if (!r) {
+		if (*result < pmd->metadata_reserve)
+			*result = 0;
+		else
+			*result -= pmd->metadata_reserve;
+	}
 	up_read(&pmd->root_lock);
 
 	return r;
@@ -1943,8 +1972,11 @@
 	int r = -EINVAL;
 
 	down_write(&pmd->root_lock);
-	if (!pmd->fail_io)
+	if (!pmd->fail_io) {
 		r = __resize_space_map(pmd->metadata_sm, new_count);
+		if (!r)
+			__set_metadata_reserve(pmd);
+	}
 	up_write(&pmd->root_lock);
 
 	return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 5a03e15..e697283 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -200,7 +200,13 @@
 enum pool_mode {
 	PM_WRITE,		/* metadata may be changed */
 	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
+
+	/*
+	 * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
+	 */
+	PM_OUT_OF_METADATA_SPACE,
 	PM_READ_ONLY,		/* metadata may not be changed */
+
 	PM_FAIL,		/* all I/O fails */
 };
 
@@ -1386,7 +1392,35 @@
 
 static void requeue_bios(struct pool *pool);
 
-static void check_for_space(struct pool *pool)
+static bool is_read_only_pool_mode(enum pool_mode mode)
+{
+	return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
+}
+
+static bool is_read_only(struct pool *pool)
+{
+	return is_read_only_pool_mode(get_pool_mode(pool));
+}
+
+static void check_for_metadata_space(struct pool *pool)
+{
+	int r;
+	const char *ooms_reason = NULL;
+	dm_block_t nr_free;
+
+	r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
+	if (r)
+		ooms_reason = "Could not get free metadata blocks";
+	else if (!nr_free)
+		ooms_reason = "No free metadata blocks";
+
+	if (ooms_reason && !is_read_only(pool)) {
+		DMERR("%s", ooms_reason);
+		set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
+	}
+}
+
+static void check_for_data_space(struct pool *pool)
 {
 	int r;
 	dm_block_t nr_free;
@@ -1412,14 +1446,16 @@
 {
 	int r;
 
-	if (get_pool_mode(pool) >= PM_READ_ONLY)
+	if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
 		return -EINVAL;
 
 	r = dm_pool_commit_metadata(pool->pmd);
 	if (r)
 		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
-	else
-		check_for_space(pool);
+	else {
+		check_for_metadata_space(pool);
+		check_for_data_space(pool);
+	}
 
 	return r;
 }
@@ -1485,6 +1521,19 @@
 		return r;
 	}
 
+	r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
+	if (r) {
+		metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
+		return r;
+	}
+
+	if (!free_blocks) {
+		/* Let's commit before we use up the metadata reserve. */
+		r = commit(pool);
+		if (r)
+			return r;
+	}
+
 	return 0;
 }
 
@@ -1516,6 +1565,7 @@
 	case PM_OUT_OF_DATA_SPACE:
 		return pool->pf.error_if_no_space ? -ENOSPC : 0;
 
+	case PM_OUT_OF_METADATA_SPACE:
 	case PM_READ_ONLY:
 	case PM_FAIL:
 		return -EIO;
@@ -2479,8 +2529,9 @@
 		error_retry_list(pool);
 		break;
 
+	case PM_OUT_OF_METADATA_SPACE:
 	case PM_READ_ONLY:
-		if (old_mode != new_mode)
+		if (!is_read_only_pool_mode(old_mode))
 			notify_of_pool_mode_change(pool, "read-only");
 		dm_pool_metadata_read_only(pool->pmd);
 		pool->process_bio = process_bio_read_only;
@@ -3418,6 +3469,10 @@
 		DMINFO("%s: growing the metadata device from %llu to %llu blocks",
 		       dm_device_name(pool->pool_md),
 		       sb_metadata_dev_size, metadata_dev_size);
+
+		if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
+			set_pool_mode(pool, PM_WRITE);
+
 		r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
 		if (r) {
 			metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3721,7 +3776,7 @@
 	struct pool_c *pt = ti->private;
 	struct pool *pool = pt->pool;
 
-	if (get_pool_mode(pool) >= PM_READ_ONLY) {
+	if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
 		DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
 		      dm_device_name(pool->pool_md));
 		return -EOPNOTSUPP;
@@ -3795,6 +3850,7 @@
 	dm_block_t nr_blocks_data;
 	dm_block_t nr_blocks_metadata;
 	dm_block_t held_root;
+	enum pool_mode mode;
 	char buf[BDEVNAME_SIZE];
 	char buf2[BDEVNAME_SIZE];
 	struct pool_c *pt = ti->private;
@@ -3865,9 +3921,10 @@
 		else
 			DMEMIT("- ");
 
-		if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+		mode = get_pool_mode(pool);
+		if (mode == PM_OUT_OF_DATA_SPACE)
 			DMEMIT("out_of_data_space ");
-		else if (pool->pf.mode == PM_READ_ONLY)
+		else if (is_read_only_pool_mode(mode))
 			DMEMIT("ro ");
 		else
 			DMEMIT("rw ");
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index fcc2b57..e870b09 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -302,15 +302,6 @@
 	while (cinfo->recovery_map) {
 		slot = fls64((u64)cinfo->recovery_map) - 1;
 
-		/* Clear suspend_area associated with the bitmap */
-		spin_lock_irq(&cinfo->suspend_lock);
-		list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
-			if (slot == s->slot) {
-				list_del(&s->list);
-				kfree(s);
-			}
-		spin_unlock_irq(&cinfo->suspend_lock);
-
 		snprintf(str, 64, "bitmap%04d", slot);
 		bm_lockres = lockres_init(mddev, str, NULL, 1);
 		if (!bm_lockres) {
@@ -329,6 +320,16 @@
 			pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
 			goto clear_bit;
 		}
+
+		/* Clear suspend_area associated with the bitmap */
+		spin_lock_irq(&cinfo->suspend_lock);
+		list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+			if (slot == s->slot) {
+				list_del(&s->list);
+				kfree(s);
+			}
+		spin_unlock_irq(&cinfo->suspend_lock);
+
 		if (hi > 0) {
 			if (lo < mddev->recovery_cp)
 				mddev->recovery_cp = lo;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8fd2ac3..0e52852 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4381,11 +4381,12 @@
 		allow_barrier(conf);
 	}
 
+	raise_barrier(conf, 0);
 read_more:
 	/* Now schedule reads for blocks from sector_nr to last */
 	r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
 	r10_bio->state = 0;
-	raise_barrier(conf, sectors_done != 0);
+	raise_barrier(conf, 1);
 	atomic_set(&r10_bio->remaining, 0);
 	r10_bio->mddev = mddev;
 	r10_bio->sector = sector_nr;
@@ -4492,6 +4493,8 @@
 	if (sector_nr <= last)
 		goto read_more;
 
+	lower_barrier(conf);
+
 	/* Now that we have done the whole section we can
 	 * update reshape_progress
 	 */
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 7e68762..fa1cb24 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -834,7 +834,7 @@
 	 * set COM8
 	 */
 	if (priv->band_filter) {
-		ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 1);
+		ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
 		if (!ret)
 			ret = ov772x_mask_set(client, BDBASE,
 					      0xff, 256 - priv->band_filter);
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 400ce0c..e00fa03 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -384,12 +384,17 @@
 				struct v4l2_pix_format_mplane *pixm,
 				const struct fimc_fmt **fmt)
 {
-	*fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+	const struct fimc_fmt *__fmt;
+
+	__fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+
+	if (fmt)
+		*fmt = __fmt;
 
 	pixm->colorspace = V4L2_COLORSPACE_SRGB;
 	pixm->field = V4L2_FIELD_NONE;
-	pixm->num_planes = (*fmt)->memplanes;
-	pixm->pixelformat = (*fmt)->fourcc;
+	pixm->num_planes = __fmt->memplanes;
+	pixm->pixelformat = __fmt->fourcc;
 	/*
 	 * TODO: double check with the docmentation these width/height
 	 * constraints are correct.
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index ae8c6b3..7f0ed5a 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1417,7 +1417,7 @@
 				     sizeof(struct viu_reg), DRV_NAME)) {
 		dev_err(&op->dev, "Error while requesting mem region\n");
 		ret = -EBUSY;
-		goto err;
+		goto err_irq;
 	}
 
 	/* remap registers */
@@ -1425,7 +1425,7 @@
 	if (!viu_regs) {
 		dev_err(&op->dev, "Can't map register set\n");
 		ret = -ENOMEM;
-		goto err;
+		goto err_irq;
 	}
 
 	/* Prepare our private structure */
@@ -1433,7 +1433,7 @@
 	if (!viu_dev) {
 		dev_err(&op->dev, "Can't allocate private structure\n");
 		ret = -ENOMEM;
-		goto err;
+		goto err_irq;
 	}
 
 	viu_dev->vr = viu_regs;
@@ -1449,16 +1449,21 @@
 	ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
 	if (ret < 0) {
 		dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
-		goto err;
+		goto err_irq;
 	}
 
 	ad = i2c_get_adapter(0);
+	if (!ad) {
+		ret = -EFAULT;
+		dev_err(&op->dev, "couldn't get i2c adapter\n");
+		goto err_v4l2;
+	}
 
 	v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
 	if (viu_dev->hdl.error) {
 		ret = viu_dev->hdl.error;
 		dev_err(&op->dev, "couldn't register control\n");
-		goto err_vdev;
+		goto err_i2c;
 	}
 	/* This control handler will inherit the control(s) from the
 	   sub-device(s). */
@@ -1476,7 +1481,7 @@
 	vdev = video_device_alloc();
 	if (vdev == NULL) {
 		ret = -ENOMEM;
-		goto err_vdev;
+		goto err_hdl;
 	}
 
 	*vdev = viu_template;
@@ -1497,7 +1502,7 @@
 	ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
 	if (ret < 0) {
 		video_device_release(viu_dev->vdev);
-		goto err_vdev;
+		goto err_unlock;
 	}
 
 	/* enable VIU clock */
@@ -1505,12 +1510,12 @@
 	if (IS_ERR(clk)) {
 		dev_err(&op->dev, "failed to lookup the clock!\n");
 		ret = PTR_ERR(clk);
-		goto err_clk;
+		goto err_vdev;
 	}
 	ret = clk_prepare_enable(clk);
 	if (ret) {
 		dev_err(&op->dev, "failed to enable the clock!\n");
-		goto err_clk;
+		goto err_vdev;
 	}
 	viu_dev->clk = clk;
 
@@ -1521,7 +1526,7 @@
 	if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
 		dev_err(&op->dev, "Request VIU IRQ failed.\n");
 		ret = -ENODEV;
-		goto err_irq;
+		goto err_clk;
 	}
 
 	mutex_unlock(&viu_dev->lock);
@@ -1529,16 +1534,19 @@
 	dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
 	return ret;
 
-err_irq:
-	clk_disable_unprepare(viu_dev->clk);
 err_clk:
-	video_unregister_device(viu_dev->vdev);
+	clk_disable_unprepare(viu_dev->clk);
 err_vdev:
-	v4l2_ctrl_handler_free(&viu_dev->hdl);
+	video_unregister_device(viu_dev->vdev);
+err_unlock:
 	mutex_unlock(&viu_dev->lock);
+err_hdl:
+	v4l2_ctrl_handler_free(&viu_dev->hdl);
+err_i2c:
 	i2c_put_adapter(ad);
+err_v4l2:
 	v4l2_device_unregister(&viu_dev->v4l2_dev);
-err:
+err_irq:
 	irq_dispose_mapping(viu_irq);
 	return ret;
 }
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index aaa172d..e6e9ace 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -1219,7 +1219,7 @@
 		ctx_isp->substate_activated);
 	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 
-	if (ctx_isp->active_req_cnt >=  2) {
+	if (ctx_isp->active_req_cnt >=  4) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
 			"Reject apply request (id %lld) due to congestion(cnt = %d)",
 			req->request_id,
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h
index 4c5ce02..bcd8a9e 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h
@@ -28,6 +28,9 @@
 #define mask_enable_clk_B       0x2
 #define mask_ctrl_1_A           0x5
 #define mask_ctrl_1_B           0xA
+#define mask_reset_A            0x1
+#define mask_reset_B            0x7
+#define mask_shutdown_A         0x3
 #define mask_hs_freq_range      0x7F
 #define mask_osc_freq_2         0xFF
 #define mask_osc_freq_3         0xF00
@@ -56,8 +59,6 @@
 	{0x58C, 0xFF},   /* mipi_csiphy_irq_mask_ctrl_lane_0 */
 	{0x5C8, 0xFF},   /* mipi_csiphy_irq_mask_ctrl_lane_clk_0 */
 	{0x20, 0x0},     /* mipi_csiphy_rx_sys_7_00 */
-	{0x28, 0x43},    /* mipi_csiphy_rx_sys_9_00 */
-	{0x380, 0x0},    /* mipi_csiphy_rx_startup_ovr_0_00 */
 	{0x384, 0x0},    /* mipi_csiphy_rx_startup_ovr_1_00 */
 	{0x388, 0xCC},   /* mipi_csiphy_rx_startup_ovr_2_00 */
 	{0x38C, 0x1},    /* mipi_csiphy_rx_startup_ovr_3_00 */
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index 1f359bb..f98e23f 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -252,10 +252,13 @@
 		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
 		mipi_csiphy_rx_sys_7_00.addr + offset);
 
-	msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg.
-		mipi_csiphy_rx_sys_9_00.data,
+	value = msm_camera_io_r(csiphybase +
+		csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_rx_clk_lane_6_00.addr + offset);
+	value |= SET_THE_BIT(7);
+	msm_camera_io_w(value,
 		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
-		mipi_csiphy_rx_sys_9_00.addr + offset);
+		mipi_csiphy_rx_clk_lane_6_00.addr + offset);
 
 	msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg.
 		mipi_csiphy_rx_startup_ovr_4_00.data,
@@ -317,7 +320,7 @@
 	uint16_t lane_mask = 0;
 	void __iomem *csiphybase;
 	enum snps_csiphy_mode mode = INVALID_MODE;
-	uint32_t value, num_tries, num_lanes, offset;
+	uint32_t value, num_tries, num_lanes, offset = SNPS_INTERPHY_OFFSET;
 	uint32_t clk_mux_reg = 0;
 
 	csiphybase = csiphy_dev->base;
@@ -497,17 +500,6 @@
 
 		value = msm_camera_io_r(csiphybase +
 			csiphy_dev->ctrl_reg->csiphy_snps_reg.
-			mipi_csiphy_rx_startup_ovr_0_00.addr +
-			SNPS_INTERPHY_OFFSET);
-		value |= SET_THE_BIT(0);
-		value |= SET_THE_BIT(1);
-		msm_camera_io_w(value,
-			csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
-			mipi_csiphy_rx_startup_ovr_0_00.addr +
-			SNPS_INTERPHY_OFFSET);
-
-		value = msm_camera_io_r(csiphybase +
-			csiphy_dev->ctrl_reg->csiphy_snps_reg.
 			mipi_csiphy_rx_startup_ovr_1_00.addr +
 			SNPS_INTERPHY_OFFSET);
 		value &= ~(SET_THE_BIT(0));
@@ -521,6 +513,7 @@
 			csiphy_dev->ctrl_reg->csiphy_snps_reg.
 			mipi_csiphy_rx_clk_lane_6_00.addr);
 		value |= SET_THE_BIT(2);
+		value &= ~(SET_THE_BIT(7));
 		msm_camera_io_w(value,
 			csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
 			mipi_csiphy_rx_clk_lane_6_00.addr);
@@ -530,7 +523,7 @@
 			mipi_csiphy_rx_clk_lane_6_00.addr +
 			SNPS_INTERPHY_OFFSET);
 		value |= SET_THE_BIT(3);
-		value |= SET_THE_BIT(7);
+		value &= ~(SET_THE_BIT(7));
 		value &= ~(SET_THE_BIT(2));
 		msm_camera_io_w(value,
 			csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
@@ -592,36 +585,109 @@
 		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
 		mipi_csiphy_enable_clk.addr);
 
-	value = 0x0;
-	if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A)
-		value |= mask_ctrl_1_A;
-	if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_B)
-		value |= mask_ctrl_1_B;
-	msm_camera_io_w(value,
+	if (mode == TWO_LANE_PHY_A) {
+		msm_camera_io_w(mask_reset_A,
 		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
 		mipi_csiphy_ctrl_1.addr);
 
-	if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A)
-		offset = 0x0;
-	else
-		offset = SNPS_INTERPHY_OFFSET;
+		msm_camera_io_w(mask_ctrl_1_A,
+		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_ctrl_1.addr);
 
-	value = 0x0;
-	num_tries = 0;
+		value = 0x0;
+		num_tries = 0;
 
-	do {
-		num_tries++;
-		value = msm_camera_io_r(csiphybase +
-			csiphy_dev->ctrl_reg->csiphy_snps_reg.
-			mipi_csiphy_rx_startup_obs_2_00.addr + offset);
-		if ((value | SET_THE_BIT(4)) == value)
-			break;
-		usleep_range(100, 150);
-	} while (num_tries < 6);
+		do {
+			num_tries++;
+			value = msm_camera_io_r(csiphybase +
+				csiphy_dev->ctrl_reg->csiphy_snps_reg.
+				mipi_csiphy_rx_startup_obs_2_00.addr);
+			if ((value | SET_THE_BIT(4)) == value)
+				break;
+			usleep_range(100, 150);
+		} while (num_tries < 6);
+		if ((value | SET_THE_BIT(4)) != value) {
+			pr_err("%s: SNPS phy config failed\n", __func__);
+			return -EINVAL;
+		}
+	}
 
-	if ((value | SET_THE_BIT(4)) != value) {
-		pr_err("%s: SNPS phy config failed\n", __func__);
-		return -EINVAL;
+	if (mode == TWO_LANE_PHY_B) {
+		msm_camera_io_w(mask_reset_B,
+		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_ctrl_1.addr);
+
+		msm_camera_io_w(mask_ctrl_1_A|mask_ctrl_1_B,
+		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_ctrl_1.addr);
+
+		value = 0x0;
+		num_tries = 0;
+
+		do {
+			num_tries++;
+			value = msm_camera_io_r(csiphybase +
+				csiphy_dev->ctrl_reg->csiphy_snps_reg.
+				mipi_csiphy_rx_startup_obs_2_00.addr + offset);
+			if ((value | SET_THE_BIT(4)) == value)
+				break;
+			usleep_range(100, 150);
+		} while (num_tries < 6);
+
+		if ((value | SET_THE_BIT(4)) != value) {
+			pr_err("%s: SNPS phy config failed\n", __func__);
+			return -EINVAL;
+		}
+	}
+
+	if (mode == AGGREGATE_MODE) {
+		msm_camera_io_w(mask_shutdown_A,
+		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_ctrl_1.addr);
+
+		msm_camera_io_w(mask_reset_B,
+		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_ctrl_1.addr);
+
+		value = 0x0;
+		num_tries = 0;
+
+		do {
+			num_tries++;
+			value = msm_camera_io_r(csiphybase +
+				csiphy_dev->ctrl_reg->csiphy_snps_reg.
+				mipi_csiphy_rx_startup_obs_2_00.addr);
+			if ((value | SET_THE_BIT(4)) == value)
+				break;
+			usleep_range(100, 150);
+		} while (num_tries < 6);
+
+		if ((value | SET_THE_BIT(4)) != value) {
+			pr_err("%s: SNPS phy config failed\n", __func__);
+			return -EINVAL;
+		}
+
+		msm_camera_io_w(mask_ctrl_1_A|mask_ctrl_1_B,
+		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_ctrl_1.addr);
+
+		value = 0x0;
+		num_tries = 0;
+
+		do {
+			num_tries++;
+			value = msm_camera_io_r(csiphybase +
+				csiphy_dev->ctrl_reg->csiphy_snps_reg.
+				mipi_csiphy_rx_startup_obs_2_00.addr + offset);
+			if ((value | SET_THE_BIT(4)) == value)
+				break;
+			usleep_range(100, 150);
+		} while (num_tries < 6);
+
+		if ((value | SET_THE_BIT(4)) != value) {
+			pr_err("%s: SNPS phy config failed\n", __func__);
+			return -EINVAL;
+		}
 	}
 
 	msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg.
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index 6fc084c..dade2e3 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -83,8 +83,6 @@
 	struct csiphy_reg_t mipi_csiphy_irq_mask_ctrl_lane_0;
 	struct csiphy_reg_t mipi_csiphy_irq_mask_ctrl_lane_clk_0;
 	struct csiphy_reg_t mipi_csiphy_rx_sys_7_00;
-	struct csiphy_reg_t mipi_csiphy_rx_sys_9_00;
-	struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_0_00;
 	struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_1_00;
 	struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_2_00;
 	struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_3_00;
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index e6a4ed30..148df7d 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1393,7 +1393,6 @@
 	struct hal_h264_entropy_control h264_entropy_control;
 	struct hal_intra_period intra_period;
 	struct hal_idr_period idr_period;
-	struct hal_vpe_rotation vpe_rotation;
 	struct hal_intra_refresh intra_refresh;
 	struct hal_multi_slice_control multi_slice_control;
 	struct hal_h264_db_control h264_db_control;
@@ -1692,28 +1691,12 @@
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
 	{
-		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_FLIP);
-		property_id = HAL_PARAM_VPE_ROTATION;
-		vpe_rotation.rotate = msm_comm_v4l2_to_hal(
-				V4L2_CID_MPEG_VIDC_VIDEO_ROTATION,
-				ctrl->val);
-		vpe_rotation.flip = msm_comm_v4l2_to_hal(
-				V4L2_CID_MPEG_VIDC_VIDEO_FLIP,
-				temp_ctrl->val);
-		pdata = &vpe_rotation;
+		dprintk(VIDC_DBG, "Rotation %d\n", ctrl->val);
 		break;
 	}
 	case V4L2_CID_MPEG_VIDC_VIDEO_FLIP:
 	{
-		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
-		property_id = HAL_PARAM_VPE_ROTATION;
-		vpe_rotation.rotate = msm_comm_v4l2_to_hal(
-				V4L2_CID_MPEG_VIDC_VIDEO_ROTATION,
-				temp_ctrl->val);
-		vpe_rotation.flip = msm_comm_v4l2_to_hal(
-				V4L2_CID_MPEG_VIDC_VIDEO_FLIP,
-				ctrl->val);
-		pdata = &vpe_rotation;
+		dprintk(VIDC_DBG, "Flip %d\n", ctrl->val);
 		break;
 	}
 	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 815d137..18bd77f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -946,6 +946,59 @@
 	return 0;
 }
 
+static int msm_vidc_set_rotation(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	int value = 0;
+	struct hfi_device *hdev;
+	struct hal_vpe_rotation vpe_rotation;
+	struct hal_frame_size frame_sz;
+
+	hdev = inst->core->device;
+
+	/* Set rotation and flip first */
+	value = msm_comm_g_ctrl_for_id(inst, V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
+	if (value < 0) {
+		dprintk(VIDC_ERR, "Get control for rotation failed\n");
+		return value;
+	}
+	vpe_rotation.rotate = value;
+	value = msm_comm_g_ctrl_for_id(inst, V4L2_CID_MPEG_VIDC_VIDEO_FLIP);
+	if (value < 0) {
+		dprintk(VIDC_ERR, "Get control for flip failed\n");
+		return value;
+	}
+	vpe_rotation.flip = value;
+	dprintk(VIDC_DBG, "Set rotation = %d, flip = %d for capture port.\n",
+		vpe_rotation.rotate, vpe_rotation.flip);
+	rc = call_hfi_op(hdev, session_set_property,
+		(void *)inst->session,
+		HAL_PARAM_VPE_ROTATION, &vpe_rotation);
+	if (rc) {
+		dprintk(VIDC_ERR, "Set rotation/flip at start stream failed\n");
+		return rc;
+	}
+
+	/* flip the output resolution if required */
+	value = vpe_rotation.rotate;
+	if (value == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90 ||
+			value == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270) {
+		frame_sz.buffer_type = HAL_BUFFER_OUTPUT;
+		frame_sz.width = inst->prop.height[CAPTURE_PORT];
+		frame_sz.height = inst->prop.width[CAPTURE_PORT];
+		dprintk(VIDC_DBG, "CAPTURE port width = %d, height = %d\n",
+			frame_sz.width, frame_sz.height);
+		rc = call_hfi_op(hdev, session_set_property, (void *)
+			inst->session, HAL_PARAM_FRAME_SIZE, &frame_sz);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to set framesize for CAPTURE port\n");
+			return rc;
+		}
+	}
+	return rc;
+}
+
 static inline int start_streaming(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -954,6 +1007,15 @@
 
 	hdev = inst->core->device;
 
+	if (inst->session_type == MSM_VIDC_ENCODER) {
+		rc = msm_vidc_set_rotation(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Set rotation for encoder failed\n");
+			goto fail_start;
+		}
+	}
+
 	/* Create tile info table */
 	rc = msm_vidc_create_tile_info_table(inst);
 	if (rc) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 176b9c6..4017492 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1645,12 +1645,12 @@
 		planes[0] = event_notify->packet_buffer;
 		planes[1] = event_notify->extra_data_buffer;
 		mbuf = msm_comm_get_buffer_using_device_planes(inst, planes);
-		mbuf->output_tag = event_notify->output_tag;
 		if (!mbuf || !kref_get_mbuf(inst, mbuf)) {
 			dprintk(VIDC_ERR,
 				"%s: data_addr %x, extradata_addr %x not found\n",
 				__func__, planes[0], planes[1]);
 		} else {
+			mbuf->output_tag = event_notify->output_tag;
 			handle_release_buffer_reference(inst, mbuf);
 			kref_put_mbuf(mbuf);
 		}
@@ -5522,7 +5522,6 @@
 {
 	u32 x_min, x_max, y_min, y_max;
 	u32 input_height, input_width, output_height, output_width;
-	u32 rotation;
 
 	if (is_heic_encode_session(inst)) {
 		dprintk(VIDC_DBG, "Skip downscale check for HEIC\n");
@@ -5563,20 +5562,6 @@
 		return 0;
 	}
 
-	rotation =  msm_comm_g_ctrl_for_id(inst,
-					V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
-
-	if ((output_width != output_height) &&
-		(rotation == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90 ||
-		rotation == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270)) {
-
-		output_width = inst->prop.height[CAPTURE_PORT];
-		output_height = inst->prop.width[CAPTURE_PORT];
-		dprintk(VIDC_DBG,
-			"Rotation=%u Swapped Output W=%u H=%u to check scaling",
-			rotation, output_width, output_height);
-	}
-
 	x_min = (1<<16)/inst->capability.scale_x.min;
 	y_min = (1<<16)/inst->capability.scale_y.min;
 	x_max = inst->capability.scale_x.max >> 16;
@@ -5622,7 +5607,6 @@
 	struct hfi_device *hdev;
 	struct msm_vidc_core *core;
 	u32 output_height, output_width, input_height, input_width;
-	u32 rotation;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
@@ -5661,23 +5645,9 @@
 		rc = -ENOTSUPP;
 	}
 
-	rotation =  msm_comm_g_ctrl_for_id(inst,
-					V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
-
 	output_height = ALIGN(inst->prop.height[CAPTURE_PORT], 16);
 	output_width = ALIGN(inst->prop.width[CAPTURE_PORT], 16);
 
-	if ((output_width != output_height) &&
-		(rotation == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90 ||
-		rotation == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270)) {
-
-		output_width = ALIGN(inst->prop.height[CAPTURE_PORT], 16);
-		output_height = ALIGN(inst->prop.width[CAPTURE_PORT], 16);
-		dprintk(VIDC_DBG,
-			"Rotation=%u Swapped Output W=%u H=%u to check capability",
-			rotation, output_width, output_height);
-	}
-
 	if (!rc) {
 		if (output_width < capability->width.min ||
 			output_height < capability->height.min) {
@@ -6474,7 +6444,7 @@
 	mutex_lock(&inst->registeredbufs.lock);
 	found = false;
 	list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
-		if (msm_comm_compare_device_planes(mbuf, planes)) {
+		if (msm_comm_compare_device_plane(mbuf, planes, 0)) {
 			found = true;
 			break;
 		}
@@ -6549,8 +6519,14 @@
 	mutex_lock(&inst->registeredbufs.lock);
 	if (inst->session_type == MSM_VIDC_DECODER) {
 		list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
-			if (msm_comm_compare_dma_planes(inst, mbuf,
-					dma_planes)) {
+			/*
+			 * client might have queued same plane[0] but different
+			 * plane[1] search plane[0] and if found don't queue the
+			 * buffer, the buffer will be queued when rbr event
+			 * arrived.
+			 */
+			if (msm_comm_compare_dma_plane(inst, mbuf,
+						dma_planes, 0)) {
 				found = true;
 				break;
 			}
@@ -6607,37 +6583,15 @@
 		}
 	}
 
-	/* special handling for decoder */
+    /* special handling for decoder
+     * If RBR pending on this buffer then enable RBR_PENDING flag
+     * and clear the DEFERRED flag to avoid this buffer getting
+     * queued to video hardware in msm_comm_qbuf() which tries to
+     * queue all the DEFERRED buffers.
+     */
 	if (inst->session_type == MSM_VIDC_DECODER) {
 		if (found) {
 			rc = -EEXIST;
-		} else {
-			bool found_plane0 = false;
-			struct msm_vidc_buffer *temp;
-			/*
-			 * client might have queued same plane[0] but different
-			 * plane[1] search plane[0] and if found don't queue the
-			 * buffer, the buffer will be queued when rbr event
-			 * arrived.
-			 */
-			list_for_each_entry(temp, &inst->registeredbufs.list,
-						list) {
-				if (msm_comm_compare_dma_plane(inst, temp,
-						dma_planes, 0)) {
-					found_plane0 = true;
-					break;
-				}
-			}
-			if (found_plane0)
-				rc = -EEXIST;
-		}
-		/*
-		 * If RBR pending on this buffer then enable RBR_PENDING flag
-		 * and clear the DEFERRED flag to avoid this buffer getting
-		 * queued to video hardware in msm_comm_qbuf() which tries to
-		 * queue all the DEFERRED buffers.
-		 */
-		if (rc == -EEXIST) {
 			mbuf->flags |= MSM_VIDC_FLAG_RBR_PENDING;
 			mbuf->flags &= ~MSM_VIDC_FLAG_DEFERRED;
 		}
@@ -6692,22 +6646,23 @@
 		goto unlock;
 	}
 
-	print_vidc_buffer(VIDC_DBG, "dqbuf", inst, mbuf);
 	for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
 		if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
 			print_vidc_buffer(VIDC_ERR,
-				"dqbuf: unmap failed.", inst, mbuf);
+					"dqbuf: unmap failed.", inst, mbuf);
 
-		if (!(mbuf->vvb.flags & V4L2_QCOM_BUF_FLAG_READONLY)) {
-			/* rbr won't come for this buffer */
-			if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
-				print_vidc_buffer(VIDC_ERR,
-					"dqbuf: unmap failed..", inst, mbuf);
-		} else {
-			/* RBR event expected */
+		if (i == 0 && mbuf->vvb.flags & V4L2_QCOM_BUF_FLAG_READONLY) {
+			/* RBR event expected only for plane[0] */
 			mbuf->flags |= MSM_VIDC_FLAG_RBR_PENDING;
+			continue;
+		}
+
+		if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i])) {
+			print_vidc_buffer(VIDC_ERR,
+					"dqbuf: unmap failed..", inst, mbuf);
 		}
 	}
+	print_vidc_buffer(VIDC_DBG, "dqbuf", inst, mbuf);
 	/*
 	 * remove the entry if plane[0].refcount is zero else
 	 * don't remove as client queued same buffer that's why
@@ -6755,39 +6710,27 @@
 		/* clear RBR_PENDING flag */
 		mbuf->flags &= ~MSM_VIDC_FLAG_RBR_PENDING;
 
-		for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
-			if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
-				print_vidc_buffer(VIDC_ERR,
+		/*
+		 * Extradata plane is not accounted for in the registered list.
+		 * Hence unref only the image plane to check if it has
+		 * outstanding refs (pending rbr case)
+		 */
+		if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[0]))
+			print_vidc_buffer(VIDC_ERR,
 					"rbr unmap failed.", inst, mbuf);
-		}
+
 		/* refcount is not zero if client queued the same buffer */
 		if (!mbuf->smem[0].refcount) {
 			list_del(&mbuf->list);
 			kref_put_mbuf(mbuf);
 			mbuf = NULL;
+			found = false;
 		}
 	} else {
 		print_vidc_buffer(VIDC_ERR, "mbuf not found", inst, mbuf);
 		goto unlock;
 	}
 
-	/*
-	 * 1. client might have pushed same planes in which case mbuf will be
-	 *    same and refcounts are positive and buffer wouldn't have been
-	 *    removed from the registeredbufs list.
-	 * 2. client might have pushed same planes[0] but different planes[1]
-	 *    in which case mbuf will be different.
-	 * 3. in either case we can search mbuf->smem[0].device_addr in the list
-	 *    and if found queue it to video hw (if not flushing).
-	 */
-	found = false;
-	list_for_each_entry(temp, &inst->registeredbufs.list, list) {
-		if (msm_comm_compare_device_plane(temp, planes, 0)) {
-			mbuf = temp;
-			found = true;
-			break;
-		}
-	}
 	if (!found)
 		goto unlock;
 
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vdec.c b/drivers/media/platform/msm/vidc_3x/msm_vdec.c
index bb4b6c8..2ff610a 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vdec.c
@@ -1943,6 +1943,7 @@
 	inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->prop.fps = DEFAULT_FPS;
+	inst->prop.operating_rate = 0;
 	memcpy(&inst->fmts[OUTPUT_PORT], &vdec_formats[2],
 						sizeof(struct msm_vidc_format));
 	memcpy(&inst->fmts[CAPTURE_PORT], &vdec_formats[0],
@@ -2551,8 +2552,33 @@
 		 */
 		hal_property.enable = !(ctrl->val);
 		pdata = &hal_property;
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_DISABLE:
+			inst->flags &= ~VIDC_REALTIME;
+			break;
+		case V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_ENABLE:
+			inst->flags |= VIDC_REALTIME;
+			break;
+		default:
+			dprintk(VIDC_WARN,
+				"inst(%pK) invalid priority ctrl value %#x\n",
+				inst, ctrl->val);
+			break;
+		}
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
+		if ((ctrl->val >> 16) < inst->capability.frame_rate.min ||
+			 (ctrl->val >> 16) > inst->capability.frame_rate.max) {
+			dprintk(VIDC_ERR, "Invalid operating rate %u\n",
+				(ctrl->val >> 16));
+			rc = -ENOTSUPP;
+		} else {
+			dprintk(VIDC_DBG,
+				"inst(%pK) operating rate changed from %d to %d\n",
+				inst, inst->prop.operating_rate >> 16,
+					ctrl->val >> 16);
+			inst->prop.operating_rate = ctrl->val;
+		}
 		break;
 	default:
 		break;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_venc.c b/drivers/media/platform/msm/vidc_3x/msm_venc.c
index ef6e360..5e98a5c 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_venc.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_venc.c
@@ -3625,8 +3625,33 @@
 		 */
 		enable.enable = !(ctrl->val);
 		pdata = &enable;
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_DISABLE:
+			inst->flags &= ~VIDC_REALTIME;
+			break;
+		case V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_ENABLE:
+			inst->flags |= VIDC_REALTIME;
+			break;
+		default:
+			dprintk(VIDC_WARN,
+				"inst(%pK) invalid priority ctrl value %#x\n",
+				inst, ctrl->val);
+			break;
+		}
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
+		if ((ctrl->val >> 16) < inst->capability.frame_rate.min ||
+			 (ctrl->val >> 16) > inst->capability.frame_rate.max) {
+			dprintk(VIDC_ERR, "Invalid operating rate %u\n",
+				(ctrl->val >> 16));
+			rc = -ENOTSUPP;
+		} else {
+			dprintk(VIDC_DBG,
+				"inst(%pK) operating rate changed from %d to %d\n",
+				inst, inst->prop.operating_rate >> 16,
+					ctrl->val >> 16);
+			inst->prop.operating_rate = ctrl->val;
+		}
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_TYPE:
 	{
@@ -4067,6 +4092,7 @@
 	inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->prop.fps = DEFAULT_FPS;
+	inst->prop.operating_rate = 0;
 	inst->capability.pixelprocess_capabilities = 0;
 	memcpy(&inst->fmts[CAPTURE_PORT], &venc_formats[4],
 						sizeof(struct msm_vidc_format));
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
index 998b397..a80ae03 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
@@ -258,14 +258,9 @@
 	return 0;
 }
 
-static inline bool is_non_realtime_session(struct msm_vidc_inst *inst)
+static inline bool is_realtime_session(struct msm_vidc_inst *inst)
 {
-	int rc = 0;
-	struct v4l2_control ctrl = {
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY
-	};
-	rc = msm_comm_g_ctrl(inst, &ctrl);
-	return (!rc && ctrl.value);
+	return !!(inst->flags & VIDC_REALTIME);
 }
 
 enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst)
@@ -297,17 +292,15 @@
 
 static int msm_comm_get_mbs_per_sec(struct msm_vidc_inst *inst)
 {
-	int rc;
 	u32 fps;
-	struct v4l2_control ctrl;
 	int mb_per_frame;
+	u32 oper_rate;
 
 	mb_per_frame = msm_comm_get_mbs_per_frame(inst);
+	oper_rate = inst->prop.operating_rate;
 
-	ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE;
-	rc = msm_comm_g_ctrl(inst, &ctrl);
-	if (!rc && ctrl.value) {
-		fps = (ctrl.value >> 16) ? ctrl.value >> 16 : 1;
+	if (oper_rate) {
+		fps = (oper_rate >> 16) ? oper_rate >> 16 : 1;
 		/*
 		 * Check if operating rate is less than fps.
 		 * If Yes, then use fps to scale the clocks
@@ -354,7 +347,7 @@
 	 * ----------------|----------------------|------------------------|
 	 */
 
-	if (is_non_realtime_session(inst) &&
+	if (is_realtime_session(inst) &&
 		(quirks & LOAD_CALC_IGNORE_NON_REALTIME_LOAD)) {
 		if (!inst->prop.fps) {
 			dprintk(VIDC_INFO, "instance:%pK fps = 0\n", inst);
@@ -535,7 +528,7 @@
 
 	list_for_each_entry(inst, &core->instances, list) {
 		int codec = 0, yuv = 0;
-		struct v4l2_control ctrl;
+		u32 oper_rate;
 
 		codec = inst->session_type == MSM_VIDC_DECODER ?
 			inst->fmts[OUTPUT_PORT].fourcc :
@@ -552,11 +545,11 @@
 		vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
 			inst->prop.height[OUTPUT_PORT]);
 
-		ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE;
-		rc = msm_comm_g_ctrl(inst, &ctrl);
-		if (!rc && ctrl.value)
-			vote_data[i].fps = (ctrl.value >> 16) ?
-				ctrl.value >> 16 : 1;
+		oper_rate = inst->prop.operating_rate;
+
+		if (oper_rate)
+			vote_data[i].fps = (oper_rate >> 16) ?
+				oper_rate >> 16 : 1;
 		else
 			vote_data[i].fps = inst->prop.fps;
 
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h b/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
index 00cbafb..56b86d7 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
@@ -173,6 +173,7 @@
 	u32 height[MAX_PORT_NUM];
 	u32 fps;
 	u32 bitrate;
+	u32 operating_rate;
 };
 
 struct buf_queue {
@@ -239,6 +240,7 @@
 	VIDC_TURBO = BIT(1),
 	VIDC_THUMBNAIL = BIT(2),
 	VIDC_LOW_POWER = BIT(3),
+	VIDC_REALTIME = BIT(4),
 };
 
 struct msm_vidc_core {
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 15a86bb..1e98b48 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -304,7 +304,7 @@
 static int isp_xclk_init(struct isp_device *isp)
 {
 	struct device_node *np = isp->dev->of_node;
-	struct clk_init_data init;
+	struct clk_init_data init = { 0 };
 	unsigned int i;
 
 	for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 5c9db09..d9710b5 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -117,6 +117,8 @@
 
 	if (camif->sensor.power_count == !on)
 		err = v4l2_subdev_call(sensor->sd, core, s_power, on);
+	if (err == -ENOIOCTLCMD)
+		err = 0;
 	if (!err)
 		sensor->power_count += on ? 1 : -1;
 
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 0426b21..ee88ae8 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -273,6 +273,11 @@
 
 	ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
 					THIS_MODULE, &dev->udev->dev, adapter_nr);
+	if (ret < 0) {
+		pr_err("tm6000: couldn't register the adapter!\n");
+		goto err;
+	}
+
 	dvb->adapter.priv = dev;
 
 	if (dvb->frontend) {
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index b5589d5..48503f30 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -163,14 +163,27 @@
 	}
 }
 
+static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
+{
+	/*
+	 * Return the size of the video probe and commit controls, which depends
+	 * on the protocol version.
+	 */
+	if (stream->dev->uvc_version < 0x0110)
+		return 26;
+	else if (stream->dev->uvc_version < 0x0150)
+		return 34;
+	else
+		return 48;
+}
+
 static int uvc_get_video_ctrl(struct uvc_streaming *stream,
 	struct uvc_streaming_control *ctrl, int probe, __u8 query)
 {
+	__u16 size = uvc_video_ctrl_size(stream);
 	__u8 *data;
-	__u16 size;
 	int ret;
 
-	size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
 	if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
 			query == UVC_GET_DEF)
 		return -EIO;
@@ -225,7 +238,7 @@
 	ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
 	ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
 
-	if (size == 34) {
+	if (size >= 34) {
 		ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
 		ctrl->bmFramingInfo = data[30];
 		ctrl->bPreferedVersion = data[31];
@@ -254,11 +267,10 @@
 static int uvc_set_video_ctrl(struct uvc_streaming *stream,
 	struct uvc_streaming_control *ctrl, int probe)
 {
+	__u16 size = uvc_video_ctrl_size(stream);
 	__u8 *data;
-	__u16 size;
 	int ret;
 
-	size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
 	data = kzalloc(size, GFP_KERNEL);
 	if (data == NULL)
 		return -ENOMEM;
@@ -275,7 +287,7 @@
 	put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
 	put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
 
-	if (size == 34) {
+	if (size >= 34) {
 		put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
 		data[30] = ctrl->bmFramingInfo;
 		data[31] = ctrl->bPreferedVersion;
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 8d3171c..567d868 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -119,14 +119,6 @@
 	if (sev == NULL)
 		return;
 
-	/*
-	 * If the event has been added to the fh->subscribed list, but its
-	 * add op has not completed yet elems will be 0, treat this as
-	 * not being subscribed.
-	 */
-	if (!sev->elems)
-		return;
-
 	/* Increase event sequence number on fh. */
 	fh->sequence++;
 
@@ -212,6 +204,7 @@
 	struct v4l2_subscribed_event *sev, *found_ev;
 	unsigned long flags;
 	unsigned i;
+	int ret = 0;
 
 	if (sub->type == V4L2_EVENT_ALL)
 		return -EINVAL;
@@ -229,31 +222,36 @@
 	sev->flags = sub->flags;
 	sev->fh = fh;
 	sev->ops = ops;
+	sev->elems = elems;
+
+	mutex_lock(&fh->subscribe_lock);
 
 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 	found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
-	if (!found_ev)
-		list_add(&sev->list, &fh->subscribed);
 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
 	if (found_ev) {
+		/* Already listening */
 		kfree(sev);
-		return 0; /* Already listening */
+		goto out_unlock;
 	}
 
 	if (sev->ops && sev->ops->add) {
-		int ret = sev->ops->add(sev, elems);
+		ret = sev->ops->add(sev, elems);
 		if (ret) {
-			sev->ops = NULL;
-			v4l2_event_unsubscribe(fh, sub);
-			return ret;
+			kfree(sev);
+			goto out_unlock;
 		}
 	}
 
-	/* Mark as ready for use */
-	sev->elems = elems;
+	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+	list_add(&sev->list, &fh->subscribed);
+	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
-	return 0;
+out_unlock:
+	mutex_unlock(&fh->subscribe_lock);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
 
@@ -292,6 +290,8 @@
 		return 0;
 	}
 
+	mutex_lock(&fh->subscribe_lock);
+
 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 
 	sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -309,6 +309,8 @@
 	if (sev && sev->ops && sev->ops->del)
 		sev->ops->del(sev);
 
+	mutex_unlock(&fh->subscribe_lock);
+
 	kfree(sev);
 
 	return 0;
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index c183f09..0c5e690 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -50,6 +50,7 @@
 	INIT_LIST_HEAD(&fh->available);
 	INIT_LIST_HEAD(&fh->subscribed);
 	fh->sequence = -1;
+	mutex_init(&fh->subscribe_lock);
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_init);
 
@@ -95,6 +96,7 @@
 		return;
 	v4l_disable_media_source(fh->vdev);
 	v4l2_event_unsubscribe_all(fh);
+	mutex_destroy(&fh->subscribe_lock);
 	fh->vdev = NULL;
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 7aab376..3785c63 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -548,8 +548,8 @@
 }
 
 static const struct of_device_id usbhs_child_match_table[] = {
-	{ .compatible = "ti,omap-ehci", },
-	{ .compatible = "ti,omap-ohci", },
+	{ .compatible = "ti,ehci-omap", },
+	{ .compatible = "ti,ohci-omap3", },
 	{ }
 };
 
@@ -875,6 +875,7 @@
 		.pm		= &usbhsomap_dev_pm_ops,
 		.of_match_table = usbhs_omap_dt_ids,
 	},
+	.probe		= usbhs_omap_probe,
 	.remove		= usbhs_omap_remove,
 };
 
@@ -884,9 +885,9 @@
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
 
-static int __init omap_usbhs_drvinit(void)
+static int omap_usbhs_drvinit(void)
 {
-	return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
+	return platform_driver_register(&usbhs_omap_driver);
 }
 
 /*
@@ -898,7 +899,7 @@
  */
 fs_initcall_sync(omap_usbhs_drvinit);
 
-static void __exit omap_usbhs_drvexit(void)
+static void omap_usbhs_drvexit(void)
 {
 	platform_driver_unregister(&usbhs_omap_driver);
 }
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a233173..cf5764e 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -827,3 +827,13 @@
 source "drivers/misc/cxl/Kconfig"
 source "drivers/misc/fpr_FingerprintCard/Kconfig"
 endmenu
+
+config OKL4_LINK_SHBUF
+	tristate "OKL4 link with shared buffer transport"
+	default y
+	depends on OKL4_GUEST
+	help
+	  Enable driver for OKL4 inter-cell links using the "shared-buffer"
+	  transport. This driver presents the link to Linux as a character device
+	  which can be written to or read from to access the shared memory. An ioctl
+	  on the device is used to send a virtual interrupt to the partner cell.
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 8e5d0f6..6494a66 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -80,3 +80,5 @@
 targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
 $(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
 	$(call if_changed,objcopy)
+
+obj-$(CONFIG_OKL4_LINK_SHBUF)    += okl4-link-shbuf.o
diff --git a/drivers/misc/okl4-link-shbuf.c b/drivers/misc/okl4-link-shbuf.c
new file mode 100644
index 0000000..de65ea0
--- /dev/null
+++ b/drivers/misc/okl4-link-shbuf.c
@@ -0,0 +1,667 @@
+/*
+ * Driver for inter-cell links using the shared-buffer transport.
+ *
+ * Copyright (c) 2016 Cog Systems Pty Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/atomic.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/version.h>
+#include <microvisor/microvisor.h>
+#include <uapi/linux/okl4-link-shbuf.h>
+
+static const char DEVICE_NAME[] = "okl4_link_shbuf";
+
+/* Created devices will appear as /dev/<DEV_PREFIX><name> */
+static const char DEV_PREFIX[] = "okl4-";
+
+static const struct of_device_id okl4_link_shbuf_match[] = {
+	{
+		.compatible = "okl,microvisor-link-shbuf",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, okl4_link_shbuf_match);
+
+static struct class *link_shbuf_class;
+static dev_t link_shbuf_dev;
+
+/* A lock used to protect access to link_shbuf_dev */
+static spinlock_t device_number_allocate;
+
+/* Sentinel values for indicating missing communication channels */
+static const u32 NO_OUTGOING_IRQ = 0;
+static const int NO_INCOMING_IRQ = -1;
+
+/* Private data for this driver */
+struct link_shbuf_data {
+
+	/* Outgoing vIRQ */
+	u32 virqline;
+
+	/* Incoming vIRQ */
+	int virq;
+	atomic64_t virq_payload;
+	bool virq_pending;
+	wait_queue_head_t virq_wq;
+
+	/* Shared memory region */
+	void *base;
+	fmode_t permissions;
+	struct resource buffer;
+
+	/* Device data */
+	dev_t devt;
+	struct device *dev;
+	struct cdev cdev;
+
+};
+
+static bool link_shbuf_data_invariant(const struct link_shbuf_data *priv)
+{
+	if (!priv)
+		return false;
+
+	if (!priv->base || (uintptr_t)priv->base % PAGE_SIZE != 0)
+		return false;
+
+	if (resource_size(&priv->buffer) == 0)
+		return false;
+
+	if (!priv->dev)
+		return false;
+
+	return true;
+}
+
+static bool link_shbuf_valid_access(size_t size, loff_t pos, size_t count)
+{
+	return pos < size && count <= size && size - count >= pos;
+}
+
+static ssize_t link_shbuf_read(struct file *file, char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	long remaining;
+	const struct link_shbuf_data *priv;
+
+	/* The file should have been opened with read access to reach here */
+	if (WARN_ON(!(file->f_mode & FMODE_READ)))
+		return -EINVAL;
+
+	priv = file->private_data;
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return -EINVAL;
+
+	if (!link_shbuf_valid_access(resource_size(&priv->buffer), *ppos, count))
+		return -EINVAL;
+
+	remaining = copy_to_user(buffer, priv->base + *ppos, count);
+	*ppos += count - remaining;
+	return count - remaining;
+}
+
+static ssize_t link_shbuf_write(struct file *file, const char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	long remaining;
+	const struct link_shbuf_data *priv;
+
+	/* The file should have been opened with write access to reach here */
+	if (WARN_ON(!(file->f_mode & FMODE_WRITE)))
+		return -EINVAL;
+
+	priv = file->private_data;
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return -EINVAL;
+
+	if (!link_shbuf_valid_access(resource_size(&priv->buffer), *ppos, count))
+		return -EINVAL;
+
+	remaining = copy_from_user(priv->base + *ppos, buffer, count);
+	*ppos += count - remaining;
+	return count - remaining;
+}
+
+static unsigned int link_shbuf_poll(struct file *file, poll_table *table)
+{
+	struct link_shbuf_data *priv;
+	unsigned int mask;
+
+	priv = file->private_data;
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return POLLERR;
+
+	poll_wait(file, &priv->virq_wq, table);
+
+	/* The shared memory is always considered ready for reading and writing. */
+	mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
+
+	if (priv->virq_pending)
+		mask |= POLLPRI;
+
+	return mask;
+}
+
+static long link_shbuf_ioctl_irq_tx(const struct link_shbuf_data *priv,
+		unsigned long arg)
+{
+	okl4_error_t err;
+	u64 payload;
+	const u64 __user *user_arg = (const u64 __user*)arg;
+
+	if (priv->virqline == NO_OUTGOING_IRQ)
+		return -EINVAL;
+
+#if defined(CONFIG_ARM) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+	if (copy_from_user(&payload, user_arg, sizeof(payload)))
+                return -EFAULT;
+#else
+	if (get_user(payload, user_arg))
+		return -EFAULT;
+#endif
+
+	err = _okl4_sys_vinterrupt_raise(priv->virqline, payload);
+	if (WARN_ON(err != OKL4_OK))
+		return -EINVAL;
+
+	return 0;
+}
+
+static long link_shbuf_ioctl_irq_clr(struct link_shbuf_data *priv,
+		unsigned long arg)
+{
+	u64 payload;
+	u64 __user *user_arg = (u64 __user*)arg;
+
+	/*
+	 * Check validity of the user pointer before clearing the interrupt to avoid
+	 * races involved with having to undo the latter.
+	 */
+	if (!access_ok(VERIFY_WRITE, user_arg, sizeof(*user_arg)))
+		return -EFAULT;
+
+	/*
+	 * Note that the clearing of the pending flag can race with the setting of
+	 * this flag in the IRQ handler. It is up to the user to coordinate these
+	 * actions.
+	 */
+	priv->virq_pending = false;
+	smp_rmb();
+	payload = atomic64_xchg(&priv->virq_payload, 0);
+
+	/* We've already checked that this access is OK, so no need for put_user. */
+	if (__put_user(payload, user_arg))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long link_shbuf_ioctl(struct file *file, unsigned int request,
+		unsigned long arg)
+{
+	struct link_shbuf_data *priv;
+
+	priv = file->private_data;
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return -EINVAL;
+
+	/* We only support two ioctls */
+	switch (request) {
+
+	case OKL4_LINK_SHBUF_IOCTL_IRQ_TX:
+		return link_shbuf_ioctl_irq_tx(priv, arg);
+
+	case OKL4_LINK_SHBUF_IOCTL_IRQ_CLR:
+		return link_shbuf_ioctl_irq_clr(priv, arg);
+
+	}
+
+	/*
+	 * Handy for debugging when userspace is linking against ioctl headers from
+	 * a different kernel revision.
+	 */
+	dev_dbg(priv->dev, "ioctl request 0x%x received which did not match either "
+		"OKL4_LINK_SHBUF_IOCTL_IRQ_TX (0x%x) or OKL4_LINK_SHBUF_IOCTL_IRQ_CLR "
+		"(0x%x)\n", request, (unsigned)OKL4_LINK_SHBUF_IOCTL_IRQ_TX,
+		(unsigned)OKL4_LINK_SHBUF_IOCTL_IRQ_CLR);
+
+	return -EINVAL;
+}
+
+static int link_shbuf_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	const struct link_shbuf_data *priv;
+	unsigned long offset, pfn, flags;
+	size_t size;
+	pgprot_t prot;
+
+	/* Our caller should have taken the MM semaphore. */
+	if (WARN_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)))
+		return -EINVAL;
+
+	/*
+	 * The file should have been opened with a superset of the mmap requested
+	 * permissions.
+	 */
+	flags = vma->vm_flags;
+	if (WARN_ON((flags & VM_READ) && !(file->f_mode & FMODE_READ)))
+		return -EINVAL;
+	if (WARN_ON((flags & VM_WRITE) && !(file->f_mode & FMODE_WRITE)))
+		return -EINVAL;
+	if (WARN_ON((flags & VM_EXEC) && !(file->f_mode & FMODE_EXEC)))
+		return -EINVAL;
+
+	/* Retrieve our private data. */
+	priv = file->private_data;
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return -EINVAL;
+
+	/* Check the mmap request is within bounds. */
+	size = vma->vm_end - vma->vm_start;
+	offset = vma->vm_pgoff << PAGE_SHIFT;
+	if (!link_shbuf_valid_access(resource_size(&priv->buffer), offset, size))
+		return -EINVAL;
+
+	pfn = (priv->buffer.start + offset) >> PAGE_SHIFT;
+	prot = vm_get_page_prot(flags);
+
+	return remap_pfn_range(vma, vma->vm_start, pfn, size, prot);
+}
+
+static bool link_shbuf_access_ok(fmode_t allowed, fmode_t request)
+{
+	static const fmode_t ACCESS_MASK = FMODE_READ|FMODE_WRITE|FMODE_EXEC;
+	fmode_t relevant = request & ACCESS_MASK;
+	return (relevant & allowed) == relevant;
+}
+
+static int link_shbuf_open(struct inode *inode, struct file *file)
+{
+	struct cdev *cdev;
+	struct link_shbuf_data *priv;
+
+	/* Retrieve a pointer to our private data */
+	cdev = inode->i_cdev;
+	priv = container_of(cdev, struct link_shbuf_data, cdev);
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return -EINVAL;
+
+	if (!link_shbuf_access_ok(priv->permissions, file->f_mode))
+		return -EACCES;
+
+	file->private_data = priv;
+
+	return 0;
+}
+
+static const struct file_operations link_shbuf_ops = {
+	.owner = THIS_MODULE,
+	.read = link_shbuf_read,
+	.write = link_shbuf_write,
+	.poll = link_shbuf_poll,
+	.unlocked_ioctl = link_shbuf_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = link_shbuf_ioctl,
+#endif
+#ifdef CONFIG_MMU
+	.mmap = link_shbuf_mmap,
+#endif
+	.open = link_shbuf_open,
+};
+
+/*
+ * Interrupt handler.
+ *
+ * This function will be called when our link partner uses the ioctl on their
+ * shared memory device to send an outgoing interrupt.
+ */
+static irqreturn_t link_shbuf_irq_handler(int irq, void *data)
+{
+	u64 payload, old, new;
+	struct _okl4_sys_interrupt_get_payload_return _payload;
+
+	/* Retrieve a pointer to our private data. */
+	struct link_shbuf_data *priv = data;
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return IRQ_NONE;
+
+	/*
+	 * We should only ever be handling a single interrupt, and only if there
+	 * was an incoming interrupt in the configuration.
+	 */
+	if (WARN_ON(priv->virq < 0 || priv->virq != irq))
+		return IRQ_NONE;
+
+	_payload = _okl4_sys_interrupt_get_payload(irq);
+	payload = (u64)_payload.payload;
+
+	/*
+	 * At this point, it is possible the pending flag is already set. It is up to
+	 * the user to synchronise their transmission and acknowledgement of
+	 * interrupts.
+	 */
+
+	/* We open code atomic64_or which is not universally available. */
+	do {
+		old = atomic64_read(&priv->virq_payload);
+		new = old | payload;
+	} while (atomic64_cmpxchg(&priv->virq_payload, old, new) != old);
+	smp_wmb();
+	priv->virq_pending = true;
+
+	wake_up_interruptible(&priv->virq_wq);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Allocate a unique device number for this device.
+ *
+ * Note that this function needs to lock its access to link_shbuf_dev as there
+ * may be multiple threads attempting to acquire a new device number.
+ */
+static int link_shbuf_allocate_device(dev_t *devt)
+{
+	int ret = 0;
+	dev_t next;
+
+	spin_lock(&device_number_allocate);
+
+	*devt = link_shbuf_dev;
+	next = MKDEV(MAJOR(link_shbuf_dev), MINOR(link_shbuf_dev) + 1);
+	/* Check for overflow */
+	if (MINOR(next) != MINOR(link_shbuf_dev) + 1)
+		ret = -ENOSPC;
+	else
+		link_shbuf_dev = next;
+
+	spin_unlock(&device_number_allocate);
+
+	return ret;
+}
+
+/*
+ * Discover and add a new shared-buffer link.
+ *
+ * In the following function, we are expecting to parse device tree entries
+ * looking like the following:
+ *
+ *	hypervisor {
+ *		...
+ *		interrupt-line@1d {
+ *				compatible = "okl,microvisor-interrupt-line",
+ *				"okl,microvisor-capability";
+ *			phandle = <0x7>;
+ *			reg = <0x1d>;
+ *			label = "foo_virqline";
+ *		};
+ *	 ;
+ *
+ *	foo@41003000 {
+ *		compatible = "okl,microvisor-link-shbuf",
+ *			"okl,microvisor-shared-memory";
+ *		phandle = <0xd>;
+ *		reg = <0x0 0x41003000 0x2000>;
+ *		label = "foo";
+ *		okl,rwx = <0x6>;
+ *		okl,interrupt-line = <0x7>;
+ *		interrupts = <0x0 0x4 0x1>;
+ *		interrupt-parent = <0x1>;
+ *	};
+ */
+static int link_shbuf_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node, *virqline;
+	struct link_shbuf_data *priv;
+	const char *name;
+	u32 permissions;
+
+	node = pdev->dev.of_node;
+
+	if (!node)
+		return -ENODEV;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	/*
+	 * Retrieve the outgoing vIRQ cap. Note, this is configurable and we
+	 * anticipate that it may not exist.
+	 */
+	virqline = of_parse_phandle(node, "okl,interrupt-line", 0);
+	if (!virqline) {
+		priv->virqline = NO_OUTGOING_IRQ;
+	} else {
+		ret = of_property_read_u32(virqline, "reg", &priv->virqline);
+		if (ret < 0 || priv->virqline == OKL4_KCAP_INVALID) {
+			of_node_put(virqline);
+			ret = -ENODEV;
+			goto err_free_dev;
+		}
+	}
+	of_node_put(virqline);
+
+	/* Retrieve the incoming vIRQ number. Again, this is configurable and we
+	 * anticipate that it may not exist.
+	 */
+	priv->virq = platform_get_irq(pdev, 0);
+	if (priv->virq < 0)
+		priv->virq = NO_INCOMING_IRQ;
+
+	/* If we have a valid incoming vIRQ, register to handle it. */
+	if (priv->virq >= 0) {
+		ret = devm_request_irq(&pdev->dev, priv->virq, link_shbuf_irq_handler,
+			0, dev_name(&pdev->dev), priv);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "failed request for IRQ\n");
+			goto err_free_dev;
+		}
+	}
+
+	init_waitqueue_head(&priv->virq_wq);
+	priv->virq_pending = false;
+
+	/* Retrieve information about the shared memory region. */
+	ret = of_address_to_resource(node, 0, &priv->buffer);
+	if (ret < 0)
+		goto err_free_irq;
+	/*
+	 * We expect the Elfweaver to have validated that we have a non-NULL,
+	 * page-aligned region.
+	 */
+	if (WARN_ON(priv->buffer.start == 0) ||
+			WARN_ON(resource_size(&priv->buffer) % PAGE_SIZE != 0))
+		goto err_free_irq;
+	if (!devm_request_mem_region(&pdev->dev, priv->buffer.start,
+			resource_size(&priv->buffer), dev_name(&pdev->dev))) {
+		ret = -ENODEV;
+		goto err_free_irq;
+	}
+	priv->base = devm_ioremap(&pdev->dev, priv->buffer.start,
+			resource_size(&priv->buffer));
+	if (!priv->base)
+		goto err_release_region;
+
+	/* Read the permissions of the shared memory region. */
+	ret = of_property_read_u32(node, "okl,rwx", &permissions);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to read shared memory permissions\n");
+		goto err_unmap_dev;
+	}
+	if (permissions & ~S_IRWXO) {
+		ret = -EINVAL;
+		goto err_unmap_dev;
+	}
+	priv->permissions = ((permissions & S_IROTH) ? FMODE_READ : 0) |
+			((permissions & S_IWOTH) ? FMODE_WRITE : 0) |
+			((permissions & S_IXOTH) ? FMODE_EXEC : 0);
+	if (WARN_ON(priv->permissions == 0)) {
+		ret = -EINVAL;
+		goto err_unmap_dev;
+	}
+
+	/* Retrieve the label of this device. This will be the "name" attribute of
+	 * the corresponding "link" tag in the system's XML specification.
+	 */
+	ret = of_property_read_string(node, "label", &name);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to read label\n");
+		goto err_unmap_dev;
+	}
+
+	cdev_init(&priv->cdev, &link_shbuf_ops);
+	ret = cdev_add(&priv->cdev, link_shbuf_dev, 1);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to add char dev region\n");
+		goto err_unmap_dev;
+	}
+
+	ret = link_shbuf_allocate_device(&priv->devt);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to allocate new device number\n");
+		goto err_unmap_dev;
+	}
+
+	/* We're now ready to create the device itself. */
+	BUG_ON(name == NULL);
+	priv->dev = device_create(link_shbuf_class, &pdev->dev, priv->devt,
+		priv, "%s%s", DEV_PREFIX, name);
+	if (IS_ERR(priv->dev)) {
+		dev_err(&pdev->dev, "failed to create device\n");
+		ret = PTR_ERR(priv->dev);
+		goto err_del_dev;
+	}
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	return 0;
+
+err_del_dev:
+	cdev_del(&priv->cdev);
+err_unmap_dev:
+	devm_iounmap(&pdev->dev, priv->base);
+err_release_region:
+	devm_release_mem_region(&pdev->dev, priv->buffer.start,
+			resource_size(&priv->buffer));
+err_free_irq:
+	if (priv->virq != NO_INCOMING_IRQ)
+		devm_free_irq(&pdev->dev, priv->virq, priv);
+err_free_dev:
+	devm_kfree(&pdev->dev, priv);
+	return ret;
+}
+
+static int link_shbuf_remove(struct platform_device *pdev)
+{
+	struct link_shbuf_data *priv;
+
+	priv = dev_get_drvdata(&pdev->dev);
+	WARN_ON(!link_shbuf_data_invariant(priv));
+
+	device_destroy(link_shbuf_class, priv->devt);
+
+	cdev_del(&priv->cdev);
+
+	/*
+	 * None of the following is strictly required, as these are all managed
+	 * resources, but we clean it up anyway for clarity.
+	 */
+
+	devm_iounmap(&pdev->dev, priv->base);
+
+	devm_release_mem_region(&pdev->dev, priv->buffer.start,
+			resource_size(&priv->buffer));
+
+	if (priv->virq != NO_INCOMING_IRQ)
+		devm_free_irq(&pdev->dev, priv->virq, priv);
+
+	devm_kfree(&pdev->dev, priv);
+
+	return 0;
+}
+
+static struct platform_driver of_plat_link_shbuf_driver = {
+	.driver = {
+		.name = "okl4-shbuf",
+		.owner = THIS_MODULE,
+		.of_match_table = okl4_link_shbuf_match,
+	},
+	.probe = link_shbuf_probe,
+	.remove = link_shbuf_remove,
+};
+
+/* Maximum number of minor device numbers */
+enum {
+	MAX_MINOR = 1 << MINORBITS,
+};
+
+static int __init okl4_link_shbuf_init(void)
+{
+	int ret;
+
+	link_shbuf_class = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(link_shbuf_class)) {
+		pr_err("failed to create class\n");
+		ret = PTR_ERR(link_shbuf_class);
+		return ret;
+	}
+
+	ret = alloc_chrdev_region(&link_shbuf_dev, 0, MAX_MINOR, DEVICE_NAME);
+	if (ret < 0) {
+		pr_err("failed to allocate char dev region\n");
+		goto err_destroy_class;
+	}
+
+	ret = platform_driver_register(&of_plat_link_shbuf_driver);
+	if (ret < 0) {
+		pr_err("failed to register driver\n");
+		goto err_unregister_dev_region;
+	}
+
+	spin_lock_init(&device_number_allocate);
+
+	return 0;
+
+err_unregister_dev_region:
+	unregister_chrdev_region(link_shbuf_dev, MAX_MINOR);
+err_destroy_class:
+	class_destroy(link_shbuf_class);
+	return ret;
+}
+module_init(okl4_link_shbuf_init);
+
+static void __exit okl4_link_shbuf_exit(void)
+{
+	platform_driver_unregister(&of_plat_link_shbuf_driver);
+	unregister_chrdev_region(link_shbuf_dev, MAX_MINOR);
+	class_destroy(link_shbuf_class);
+}
+module_exit(okl4_link_shbuf_exit);
+
+MODULE_DESCRIPTION("OKL4 shared buffer link driver");
+MODULE_AUTHOR("Cog Systems Pty Ltd");
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 87a1337..eb57610 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -177,7 +177,7 @@
 		} else
 			lux = 0;
 	else
-		return -EAGAIN;
+		return 0;
 
 	/* LUX range check */
 	return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 518e2de..5e9122cd 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -45,6 +45,7 @@
 #include <linux/seq_file.h>
 #include <linux/vmw_vmci_defs.h>
 #include <linux/vmw_vmci_api.h>
+#include <linux/io.h>
 #include <asm/hypervisor.h>
 
 MODULE_AUTHOR("VMware, Inc.");
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f735ab4..5927db04 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -755,7 +755,7 @@
 	retval = get_user_pages_fast((uintptr_t) produce_uva,
 				     produce_q->kernel_if->num_pages, 1,
 				     produce_q->kernel_if->u.h.header_page);
-	if (retval < produce_q->kernel_if->num_pages) {
+	if (retval < (int)produce_q->kernel_if->num_pages) {
 		pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
 			retval);
 		qp_release_pages(produce_q->kernel_if->u.h.header_page,
@@ -767,7 +767,7 @@
 	retval = get_user_pages_fast((uintptr_t) consume_uva,
 				     consume_q->kernel_if->num_pages, 1,
 				     consume_q->kernel_if->u.h.header_page);
-	if (retval < consume_q->kernel_if->num_pages) {
+	if (retval < (int)consume_q->kernel_if->num_pages) {
 		pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
 			retval);
 		qp_release_pages(consume_q->kernel_if->u.h.header_page,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 1119292..7b221c3 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -94,6 +94,7 @@
 #define CORE_DDR_DLL_LOCK	(1 << 11)
 
 #define CORE_CLK_PWRSAVE		(1 << 1)
+#define CORE_VNDR_SPEC_ADMA_ERR_SIZE_EN	(1 << 7)
 #define CORE_HC_MCLK_SEL_DFLT		(2 << 8)
 #define CORE_HC_MCLK_SEL_HS400		(3 << 8)
 #define CORE_HC_MCLK_SEL_MASK		(3 << 8)
@@ -4924,6 +4925,12 @@
 	writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
 	host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
 
+	/* This enable ADMA error interrupt in case of length mismatch */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC) |
+			CORE_VNDR_SPEC_ADMA_ERR_SIZE_EN),
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+
 	/*
 	 * Ensure SDHCI FIFO is enabled by disabling alternative FIFO
 	 */
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index f446c66..01c8b90 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3289,7 +3289,7 @@
 	} else if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
 		host->mmc->err_stats[MMC_ERR_DAT_CRC]++;
 		return -EILSEQ;
-	} else if (intmask & MMC_ERR_ADMA) {
+	} else if (intmask & SDHCI_INT_ADMA_ERROR) {
 		host->mmc->err_stats[MMC_ERR_ADMA]++;
 		return -EIO;
 	}
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 2e46496..4e98e5a 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -284,8 +284,12 @@
                 case SIOCFINDIPDDPRT:
 			spin_lock_bh(&ipddp_route_lock);
 			rp = __ipddp_find_route(&rcp);
-			if (rp)
-				memcpy(&rcp2, rp, sizeof(rcp2));
+			if (rp) {
+				memset(&rcp2, 0, sizeof(rcp2));
+				rcp2.ip    = rp->ip;
+				rcp2.at    = rp->at;
+				rcp2.flags = rp->flags;
+			}
 			spin_unlock_bh(&ipddp_route_lock);
 
 			if (rp) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8a5e0ae..b1ea29d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -216,6 +216,7 @@
 static void bond_slave_arr_handler(struct work_struct *work);
 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
 				  int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
 
 /*---------------------------- General routines -----------------------------*/
 
@@ -1250,6 +1251,8 @@
 			return NULL;
 		}
 	}
+	INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
 	return slave;
 }
 
@@ -1257,6 +1260,7 @@
 {
 	struct bonding *bond = bond_get_bond_by_slave(slave);
 
+	cancel_delayed_work_sync(&slave->notify_work);
 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
 		kfree(SLAVE_AD_INFO(slave));
 
@@ -1278,39 +1282,26 @@
 	info->link_failure_count = slave->link_failure_count;
 }
 
-static void bond_netdev_notify(struct net_device *dev,
-			       struct netdev_bonding_info *info)
-{
-	rtnl_lock();
-	netdev_bonding_info_change(dev, info);
-	rtnl_unlock();
-}
-
 static void bond_netdev_notify_work(struct work_struct *_work)
 {
-	struct netdev_notify_work *w =
-		container_of(_work, struct netdev_notify_work, work.work);
+	struct slave *slave = container_of(_work, struct slave,
+					   notify_work.work);
 
-	bond_netdev_notify(w->dev, &w->bonding_info);
-	dev_put(w->dev);
-	kfree(w);
+	if (rtnl_trylock()) {
+		struct netdev_bonding_info binfo;
+
+		bond_fill_ifslave(slave, &binfo.slave);
+		bond_fill_ifbond(slave->bond, &binfo.master);
+		netdev_bonding_info_change(slave->dev, &binfo);
+		rtnl_unlock();
+	} else {
+		queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+	}
 }
 
 void bond_queue_slave_event(struct slave *slave)
 {
-	struct bonding *bond = slave->bond;
-	struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
-	if (!nnw)
-		return;
-
-	dev_hold(slave->dev);
-	nnw->dev = slave->dev;
-	bond_fill_ifslave(slave, &nnw->bonding_info.slave);
-	bond_fill_ifbond(bond, &nnw->bonding_info.master);
-	INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
-	queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+	queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
 }
 
 void bond_lower_state_changed(struct slave *slave)
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 2ce7ae9..c2cd540 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -744,7 +744,6 @@
 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
 {
 	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
-	unsigned int port;
 	int ret;
 
 	ret = bcm_sf2_sw_rst(priv);
@@ -756,12 +755,7 @@
 	if (priv->hw_params.num_gphy == 1)
 		bcm_sf2_gphy_enable_set(ds, true);
 
-	for (port = 0; port < DSA_MAX_PORTS; port++) {
-		if ((1 << port) & ds->enabled_port_mask)
-			bcm_sf2_port_setup(ds, port, NULL);
-		else if (dsa_is_cpu_port(ds, port))
-			bcm_sf2_imp_setup(ds, port);
-	}
+	ds->ops->setup(ds);
 
 	return 0;
 }
@@ -1135,10 +1129,10 @@
 {
 	struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
 
-	/* Disable all ports and interrupts */
 	priv->wol_ports_mask = 0;
-	bcm_sf2_sw_suspend(priv->dev->ds);
 	dsa_unregister_switch(priv->dev->ds);
+	/* Disable all ports and interrupts */
+	bcm_sf2_sw_suspend(priv->dev->ds);
 	bcm_sf2_mdio_unregister(priv);
 
 	return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 0d9ce08..1d92e03 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -422,7 +422,7 @@
 		return -ENOMEM;
 	}
 
-	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+	dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
 			   DMA_FROM_DEVICE);
 	if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
 		u64_stats_update_begin(&rx_ring->syncp);
@@ -439,7 +439,7 @@
 	rx_info->page_offset = 0;
 	ena_buf = &rx_info->ena_buf;
 	ena_buf->paddr = dma;
-	ena_buf->len = PAGE_SIZE;
+	ena_buf->len = ENA_PAGE_SIZE;
 
 	return 0;
 }
@@ -456,7 +456,7 @@
 		return;
 	}
 
-	dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+	dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
 		       DMA_FROM_DEVICE);
 
 	__free_page(page);
@@ -849,10 +849,10 @@
 	do {
 		dma_unmap_page(rx_ring->dev,
 			       dma_unmap_addr(&rx_info->ena_buf, paddr),
-			       PAGE_SIZE, DMA_FROM_DEVICE);
+			       ENA_PAGE_SIZE, DMA_FROM_DEVICE);
 
 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
-				rx_info->page_offset, len, PAGE_SIZE);
+				rx_info->page_offset, len, ENA_PAGE_SIZE);
 
 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
 			  "rx skb updated. len %d. data_len %d\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index c5eaf76..008f2d5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -321,4 +321,15 @@
 
 int ena_get_sset_count(struct net_device *netdev, int sset);
 
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passas 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
 #endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 91fbba5..16dc9ac 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -828,14 +828,22 @@
 {
 	u32 reg;
 
-	/* Stop monitoring MPD interrupt */
-	intrl2_0_mask_set(priv, INTRL2_0_MPD);
-
 	/* Clear the MagicPacket detection logic */
 	reg = umac_readl(priv, UMAC_MPD_CTRL);
 	reg &= ~MPD_EN;
 	umac_writel(priv, reg, UMAC_MPD_CTRL);
 
+	reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+	if (reg & INTRL2_0_MPD)
+		netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+	if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+		reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+				  RXCHK_BRCM_TAG_MATCH_MASK;
+		netdev_info(priv->netdev,
+			    "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+	}
+
 	netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
 }
 
@@ -868,11 +876,6 @@
 	if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
 		bcm_sysport_tx_reclaim_all(priv);
 
-	if (priv->irq0_stat & INTRL2_0_MPD) {
-		netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
-		bcm_sysport_resume_from_wol(priv);
-	}
-
 	return IRQ_HANDLED;
 }
 
@@ -1901,9 +1904,6 @@
 	/* UniMAC receive needs to be turned on */
 	umac_enable_set(priv, CMD_RX_EN, 1);
 
-	/* Enable the interrupt wake-up source */
-	intrl2_0_mask_clear(priv, INTRL2_0_MPD);
-
 	netif_dbg(priv, wol, ndev, "entered WOL mode\n");
 
 	return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 72297b7..208e9da 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1666,8 +1666,11 @@
 		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
 			tx_pkts++;
 			/* return full budget so NAPI will complete. */
-			if (unlikely(tx_pkts > bp->tx_wake_thresh))
+			if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
 				rx_pkts = budget;
+				raw_cons = NEXT_RAW_CMP(raw_cons);
+				break;
+			}
 		} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
 			rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
 			if (likely(rc >= 0))
@@ -1685,7 +1688,7 @@
 		}
 		raw_cons = NEXT_RAW_CMP(raw_cons);
 
-		if (rx_pkts == budget)
+		if (rx_pkts && rx_pkts == budget)
 			break;
 	}
 
@@ -1797,8 +1800,12 @@
 	while (1) {
 		work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
 
-		if (work_done >= budget)
+		if (work_done >= budget) {
+			if (!budget)
+				BNXT_CP_DB_REARM(cpr->cp_doorbell,
+						 cpr->cp_raw_cons);
 			break;
+		}
 
 		if (!bnxt_has_work(bp, cpr)) {
 			napi_complete(napi);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ec09fce..8f55c23 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -517,7 +517,7 @@
 		if (!(status & MACB_BIT(TGO)))
 			return 0;
 
-		usleep_range(10, 250);
+		udelay(250);
 	} while (time_before(halt_time, timeout));
 
 	return -ETIMEDOUT;
@@ -2861,6 +2861,13 @@
 	.init = macb_init,
 };
 
+static const struct macb_config sama5d3macb_config = {
+	.caps = MACB_CAPS_SG_DISABLED
+	      | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+	.clk_init = macb_clk_init,
+	.init = macb_init,
+};
+
 static const struct macb_config pc302gem_config = {
 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
 	.dma_burst_length = 16,
@@ -2925,6 +2932,7 @@
 	{ .compatible = "cdns,gem", .data = &pc302gem_config },
 	{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
 	{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+	{ .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
 	{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
 	{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
 	{ .compatible = "cdns,emac", .data = &emac_config },
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index b6ed818..06bc863 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -80,7 +80,7 @@
 	if (cb->type == DESC_TYPE_SKB)
 		dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
 				 ring_to_dma_dir(ring));
-	else
+	else if (cb->length)
 		dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
 			       ring_to_dma_dir(ring));
 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index e093cbf..f9d6845 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -213,10 +213,10 @@
 
 	/* priv data for the desc, e.g. skb when use with ip stack*/
 	void *priv;
-	u16 page_offset;
-	u16 reuse_flag;
+	u32 page_offset;
+	u32 length;     /* length of the buffer */
 
-	u16 length;     /* length of the buffer */
+	u16 reuse_flag;
 
        /* desc type, used by the ring user to mark the type of the priv data */
 	u16 type;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 111e1aa..92ed653 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -39,9 +39,9 @@
 #define SKB_TMP_LEN(SKB) \
 	(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
 
-static void fill_v2_desc(struct hnae_ring *ring, void *priv,
-			 int size, dma_addr_t dma, int frag_end,
-			 int buf_num, enum hns_desc_type type, int mtu)
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+			    int send_sz, dma_addr_t dma, int frag_end,
+			    int buf_num, enum hns_desc_type type, int mtu)
 {
 	struct hnae_desc *desc = &ring->desc[ring->next_to_use];
 	struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -63,7 +63,7 @@
 	desc_cb->type = type;
 
 	desc->addr = cpu_to_le64(dma);
-	desc->tx.send_size = cpu_to_le16((u16)size);
+	desc->tx.send_size = cpu_to_le16((u16)send_sz);
 
 	/* config bd buffer end */
 	hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -132,6 +132,14 @@
 	ring_ptr_move_fw(ring, next_to_use);
 }
 
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+			 int size, dma_addr_t dma, int frag_end,
+			 int buf_num, enum hns_desc_type type, int mtu)
+{
+	fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+			buf_num, type, mtu);
+}
+
 static const struct acpi_device_id hns_enet_acpi_match[] = {
 	{ "HISI00C1", 0 },
 	{ "HISI00C2", 0 },
@@ -288,15 +296,15 @@
 
 	/* when the frag size is bigger than hardware, split this frag */
 	for (k = 0; k < frag_buf_num; k++)
-		fill_v2_desc(ring, priv,
-			     (k == frag_buf_num - 1) ?
+		fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+				(k == frag_buf_num - 1) ?
 					sizeoflast : BD_MAX_SEND_SIZE,
-			     dma + BD_MAX_SEND_SIZE * k,
-			     frag_end && (k == frag_buf_num - 1) ? 1 : 0,
-			     buf_num,
-			     (type == DESC_TYPE_SKB && !k) ?
+				dma + BD_MAX_SEND_SIZE * k,
+				frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+				buf_num,
+				(type == DESC_TYPE_SKB && !k) ?
 					DESC_TYPE_SKB : DESC_TYPE_PAGE,
-			     mtu);
+				mtu);
 }
 
 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -529,7 +537,7 @@
 	}
 
 	skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
-			size - pull_len, truesize - pull_len);
+			size - pull_len, truesize);
 
 	 /* avoid re-using remote pages,flag default unreuse */
 	if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 6be0cae..4cd1633 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -243,7 +243,9 @@
 	}
 
 	if (h->dev->ops->adjust_link) {
+		netif_carrier_off(net_dev);
 		h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
+		netif_carrier_on(net_dev);
 		return 0;
 	}
 
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 631dbc7..0988bf1 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -2636,7 +2636,7 @@
 		/* Wait for link to drop */
 		time = jiffies + (HZ / 10);
 		do {
-			if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+			if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
 				break;
 			if (!in_interrupt())
 				schedule_timeout_interruptible(1);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 975eeb8..e84574b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -645,14 +645,14 @@
 		adapter->tx_ring = tx_old;
 		e1000_free_all_rx_resources(adapter);
 		e1000_free_all_tx_resources(adapter);
-		kfree(tx_old);
-		kfree(rx_old);
 		adapter->rx_ring = rxdr;
 		adapter->tx_ring = txdr;
 		err = e1000_up(adapter);
 		if (err)
 			goto err_setup;
 	}
+	kfree(tx_old);
+	kfree(rx_old);
 
 	clear_bit(__E1000_RESETTING, &adapter->flags);
 	return 0;
@@ -665,7 +665,8 @@
 err_alloc_rx:
 	kfree(txdr);
 err_alloc_tx:
-	e1000_up(adapter);
+	if (netif_running(adapter->netdev))
+		e1000_up(adapter);
 err_setup:
 	clear_bit(__E1000_RESETTING, &adapter->flags);
 	return err;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 7e2ebfc..ff62dc7 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -29,6 +29,7 @@
 #include <linux/clk.h>
 #include <linux/hrtimer.h>
 #include <linux/ktime.h>
+#include <linux/if_vlan.h>
 #include <uapi/linux/ppp_defs.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
@@ -4266,7 +4267,7 @@
 }
 
 /* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
 			       int ip_hdr_len, int l4_proto)
 {
 	u32 command;
@@ -5019,14 +5020,15 @@
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		int ip_hdr_len = 0;
 		u8 l4_proto;
+		__be16 l3_proto = vlan_get_protocol(skb);
 
-		if (skb->protocol == htons(ETH_P_IP)) {
+		if (l3_proto == htons(ETH_P_IP)) {
 			struct iphdr *ip4h = ip_hdr(skb);
 
 			/* Calculate IPv4 checksum and L4 checksum */
 			ip_hdr_len = ip4h->ihl;
 			l4_proto = ip4h->protocol;
-		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+		} else if (l3_proto == htons(ETH_P_IPV6)) {
 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
 
 			/* Read l4_protocol from one of IPv6 extra headers */
@@ -5038,7 +5040,7 @@
 		}
 
 		return mvpp2_txq_desc_csum(skb_network_offset(skb),
-				skb->protocol, ip_hdr_len, l4_proto);
+					   l3_proto, ip_hdr_len, l4_proto);
 	}
 
 	return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index a9dbc28..524fff2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -288,16 +288,17 @@
 		}
 }
 
-static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
 {
-	return (u16)((dev->pdev->bus->number << 8) |
+	return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
+		     (dev->pdev->bus->number << 8) |
 		     PCI_SLOT(dev->pdev->devfn));
 }
 
 /* Must be called with intf_mutex held */
 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
 {
-	u16 pci_id = mlx5_gen_pci_id(dev);
+	u32 pci_id = mlx5_gen_pci_id(dev);
 	struct mlx5_core_dev *res = NULL;
 	struct mlx5_core_dev *tmp_dev;
 	struct mlx5_priv *priv;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index eaa242d..e175fcd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -97,18 +97,57 @@
 	return 0;
 }
 
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define QED_MCP_SHMEM_RDY_MAX_RETRIES	20
+#define QED_MCP_SHMEM_RDY_ITER_MS	50
+
 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 	struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+	u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
+	u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
 	u32 drv_mb_offsize, mfw_mb_offsize;
 	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
 
 	p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
-	if (!p_info->public_base)
-		return 0;
+	if (!p_info->public_base) {
+		DP_NOTICE(p_hwfn,
+			  "The address of the MCP scratch-pad is not configured\n");
+		return -EINVAL;
+	}
 
 	p_info->public_base |= GRCBASE_MCP;
 
+	/* Get the MFW MB address and number of supported messages */
+	mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+				SECTION_OFFSIZE_ADDR(p_info->public_base,
+						     PUBLIC_MFW_MB));
+	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+	p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
+					    p_info->mfw_mb_addr +
+					    offsetof(struct public_mfw_mb,
+						     sup_msgs));
+
+	/* The driver can notify that there was an MCP reset, and might read the
+	 * SHMEM values before the MFW has completed initializing them.
+	 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
+	 * data ready indication.
+	 */
+	while (!p_info->mfw_mb_length && --cnt) {
+		msleep(msec);
+		p_info->mfw_mb_length =
+			(u16)qed_rd(p_hwfn, p_ptt,
+				    p_info->mfw_mb_addr +
+				    offsetof(struct public_mfw_mb, sup_msgs));
+	}
+
+	if (!cnt) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to get the SHMEM ready notification after %d msec\n",
+			  QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+		return -EBUSY;
+	}
+
 	/* Calculate the driver and MFW mailbox address */
 	drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
 				SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -118,13 +157,6 @@
 		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
 		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
 
-	/* Set the MFW MB address */
-	mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
-				SECTION_OFFSIZE_ADDR(p_info->public_base,
-						     PUBLIC_MFW_MB));
-	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
-	p_info->mfw_mb_length =	(u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
-
 	/* Get the current driver mailbox sequence before sending
 	 * the first command
 	 */
@@ -1198,31 +1230,61 @@
 	return rc;
 }
 
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define QED_MCP_HALT_SLEEP_MS		10
+#define QED_MCP_HALT_MAX_RETRIES	10
+
 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-	u32 resp = 0, param = 0;
+	u32 resp = 0, param = 0, cpu_state, cnt = 0;
 	int rc;
 
 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
 			 &param);
-	if (rc)
+	if (rc) {
 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+		return rc;
+	}
 
-	return rc;
+	do {
+		msleep(QED_MCP_HALT_SLEEP_MS);
+		cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+		if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+			break;
+	} while (++cnt < QED_MCP_HALT_MAX_RETRIES);
+
+	if (cnt == QED_MCP_HALT_MAX_RETRIES) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+			  qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+		return -EBUSY;
+	}
+
+	return 0;
 }
 
+#define QED_MCP_RESUME_SLEEP_MS	10
+
 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-	u32 value, cpu_mode;
+	u32 cpu_mode, cpu_state;
 
 	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
 
-	value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
-	value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
-	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
 	cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+	cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+	msleep(QED_MCP_RESUME_SLEEP_MS);
+	cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
 
-	return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+	if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+			  cpu_mode, cpu_state);
+		return -EBUSY;
+	}
+
+	return 0;
 }
 
 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index b414a05..56be1d6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -510,6 +510,7 @@
 	0
 #define MCP_REG_CPU_STATE \
 	0xe05004UL
+#define MCP_REG_CPU_STATE_SOFT_HALTED	(0x1UL << 10)
 #define MCP_REG_CPU_EVENT_MASK \
 	0xe05008UL
 #define PGLUE_B_REG_PF_BAR0_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 49bad00..5ddadcd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1800,7 +1800,8 @@
 	int (*config_loopback) (struct qlcnic_adapter *, u8);
 	int (*clear_loopback) (struct qlcnic_adapter *, u8);
 	int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
-	void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+	void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+				 u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
 	int (*get_board_info) (struct qlcnic_adapter *);
 	void (*set_mac_filter_count) (struct qlcnic_adapter *);
 	void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2042,9 +2043,10 @@
 }
 
 static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
-					u64 *addr, u16 id)
+					u64 *addr, u16 vlan,
+					struct qlcnic_host_tx_ring *tx_ring)
 {
-	adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+	adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
 }
 
 static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index c3c28f0..05d32e8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2132,7 +2132,8 @@
 }
 
 void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
-				  u16 vlan_id)
+				  u16 vlan_id,
+				  struct qlcnic_host_tx_ring *tx_ring)
 {
 	u8 mac[ETH_ALEN];
 	memcpy(&mac, addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 331ae2c..c8e012b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -550,7 +550,8 @@
 int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
 int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
 int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+				  u16 vlan, struct qlcnic_host_tx_ring *ring);
 int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
 int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
 void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4bb33af..56a3bd9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -173,7 +173,8 @@
 			 struct net_device *netdev);
 void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
-			       u64 *uaddr, u16 vlan_id);
+			       u64 *uaddr, u16 vlan_id,
+			       struct qlcnic_host_tx_ring *tx_ring);
 int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
 				     struct ethtool_coalesce *);
 int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index fedd736..e361294 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -268,13 +268,12 @@
 }
 
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
-			       u16 vlan_id)
+			       u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
 {
 	struct cmd_desc_type0 *hwdesc;
 	struct qlcnic_nic_req *req;
 	struct qlcnic_mac_req *mac_req;
 	struct qlcnic_vlan_req *vlan_req;
-	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
 	u32 producer;
 	u64 word;
 
@@ -301,7 +300,8 @@
 
 static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
 			       struct cmd_desc_type0 *first_desc,
-			       struct sk_buff *skb)
+			       struct sk_buff *skb,
+			       struct qlcnic_host_tx_ring *tx_ring)
 {
 	struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
 	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -335,7 +335,7 @@
 		    tmp_fil->vlan_id == vlan_id) {
 			if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
 				qlcnic_change_filter(adapter, &src_addr,
-						     vlan_id);
+						     vlan_id, tx_ring);
 			tmp_fil->ftime = jiffies;
 			return;
 		}
@@ -350,7 +350,7 @@
 	if (!fil)
 		return;
 
-	qlcnic_change_filter(adapter, &src_addr, vlan_id);
+	qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
 	fil->ftime = jiffies;
 	fil->vlan_id = vlan_id;
 	memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -766,7 +766,7 @@
 	}
 
 	if (adapter->drv_mac_learn)
-		qlcnic_send_filter(adapter, first_desc, skb);
+		qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
 
 	tx_ring->tx_stats.tx_bytes += skb->len;
 	tx_ring->tx_stats.xmit_called++;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f65e8cd..20f5c0c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -760,7 +760,7 @@
 };
 
 enum rtl_flag {
-	RTL_FLAG_TASK_ENABLED,
+	RTL_FLAG_TASK_ENABLED = 0,
 	RTL_FLAG_TASK_SLOW_PENDING,
 	RTL_FLAG_TASK_RESET_PENDING,
 	RTL_FLAG_TASK_PHY_PENDING,
@@ -7637,7 +7637,8 @@
 	rtl8169_update_counters(dev);
 
 	rtl_lock_work(tp);
-	clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+	/* Clear all task flags */
+	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
 
 	rtl8169_down(dev);
 	rtl_unlock_work(tp);
@@ -7820,7 +7821,9 @@
 
 	rtl_lock_work(tp);
 	napi_disable(&tp->napi);
-	clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+	/* Clear all task flags */
+	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+
 	rtl_unlock_work(tp);
 
 	rtl_pll_power_down(tp);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 890e4b0..2019e16 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -71,7 +71,7 @@
  * Description:
  * This function validates the number of Unicast address entries supported
  * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
  * logic. This function validates a valid, supported configuration is
  * selected, and defaults to 1 Unicast address if an unsupported
  * configuration is selected.
@@ -81,8 +81,7 @@
 	int x = ucast_entries;
 
 	switch (x) {
-	case 1:
-	case 32:
+	case 1 ... 32:
 	case 64:
 	case 128:
 		break;
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 2e5150b..7a14e81 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -40,8 +40,11 @@
 {
 	struct gmii2rgmii *priv = phydev->priv;
 	u16 val = 0;
+	int err;
 
-	priv->phy_drv->read_status(phydev);
+	err = priv->phy_drv->read_status(phydev);
+	if (err < 0)
+		return err;
 
 	val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
 	val &= ~XILINX_GMII2RGMII_SPEED_MASK;
@@ -81,6 +84,11 @@
 		return -EPROBE_DEFER;
 	}
 
+	if (!priv->phy_dev->drv) {
+		dev_info(dev, "Attached phy not ready\n");
+		return -EPROBE_DEFER;
+	}
+
 	priv->addr = mdiodev->addr;
 	priv->phy_drv = priv->phy_dev->drv;
 	memcpy(&priv->conv_phy_drv, priv->phy_dev->drv,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f9ec009..9670aa2 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1171,6 +1171,11 @@
 		return -EBUSY;
 	}
 
+	if (dev == port_dev) {
+		netdev_err(dev, "Cannot enslave team device to itself\n");
+		return -EINVAL;
+	}
+
 	if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
 	    vlan_uses_dev(dev)) {
 		netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 0d4440f..2b728cc 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -934,6 +934,7 @@
 	{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)},	/* Olivetti Olicard 500 */
 	{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},	/* Cinterion PLxx */
 	{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)},	/* Cinterion PHxx,PXxx */
+	{QMI_FIXED_INTF(0x1e2d, 0x0063, 10)},	/* Cinterion ALASxx (1 RmNet) */
 	{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)},	/* Cinterion PHxx,PXxx (2 RmNet) */
 	{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)},	/* Cinterion PHxx,PXxx (2 RmNet) */
 	{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)},	/* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 03d0401..8d3f938c 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1518,6 +1518,7 @@
 {
 	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
 	if (pdata) {
+		cancel_work_sync(&pdata->set_multicast);
 		netif_dbg(dev, ifdown, dev->net, "free pdata\n");
 		kfree(pdata);
 		pdata = NULL;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 0dadc60..b106a06 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -161,6 +162,8 @@
 void ath10k_debug_print_board_info(struct ath10k *ar)
 {
 	char boardinfo[100];
+	const struct firmware *board;
+	u32 crc;
 
 	if (ar->id.bmi_ids_valid)
 		scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
@@ -168,11 +171,16 @@
 	else
 		scnprintf(boardinfo, sizeof(boardinfo), "N/A");
 
+	board = ar->normal_mode_fw.board;
+	if (!IS_ERR_OR_NULL(board))
+		crc = crc32_le(0, board->data, board->size);
+	else
+		crc = 0;
+
 	ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
 		    ar->bd_api,
 		    boardinfo,
-		    crc32_le(0, ar->normal_mode_fw.board->data,
-			     ar->normal_mode_fw.board->size));
+		    crc);
 }
 
 void ath10k_debug_print_boot_info(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index ba1fe61..a3c2180 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -214,11 +214,12 @@
 	spin_lock_bh(&htt->rx_ring.lock);
 	ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
 					      htt->rx_ring.fill_cnt));
-	spin_unlock_bh(&htt->rx_ring.lock);
 
 	if (ret)
 		ath10k_htt_rx_ring_free(htt);
 
+	spin_unlock_bh(&htt->rx_ring.lock);
+
 	return ret;
 }
 
@@ -230,7 +231,9 @@
 	skb_queue_purge(&htt->rx_in_ord_compl_q);
 	skb_queue_purge(&htt->tx_fetch_ind_q);
 
+	spin_lock_bh(&htt->rx_ring.lock);
 	ath10k_htt_rx_ring_free(htt);
+	spin_unlock_bh(&htt->rx_ring.lock);
 
 	dma_free_coherent(htt->ar->dev,
 			  (htt->rx_ring.size *
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index e0d00ce..5b974bb 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -152,10 +152,9 @@
 );
 
 TRACE_EVENT(ath10k_wmi_cmd,
-	TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
-		 int ret),
+	TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
 
-	TP_ARGS(ar, id, buf, buf_len, ret),
+	TP_ARGS(ar, id, buf, buf_len),
 
 	TP_STRUCT__entry(
 		__string(device, dev_name(ar->dev))
@@ -163,7 +162,6 @@
 		__field(unsigned int, id)
 		__field(size_t, buf_len)
 		__dynamic_array(u8, buf, buf_len)
-		__field(int, ret)
 	),
 
 	TP_fast_assign(
@@ -171,17 +169,15 @@
 		__assign_str(driver, dev_driver_string(ar->dev));
 		__entry->id = id;
 		__entry->buf_len = buf_len;
-		__entry->ret = ret;
 		memcpy(__get_dynamic_array(buf), buf, buf_len);
 	),
 
 	TP_printk(
-		"%s %s id %d len %zu ret %d",
+		"%s %s id %d len %zu",
 		__get_str(driver),
 		__get_str(device),
 		__entry->id,
-		__entry->buf_len,
-		__entry->ret
+		__entry->buf_len
 	)
 );
 
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index f69b98f..642a441 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1486,10 +1486,10 @@
 	bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
 	ie_len = roundup(arg->ie_len, 4);
 	len = (sizeof(*tlv) + sizeof(*cmd)) +
-	      (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
-	      (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
-	      (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
-	      (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+	      sizeof(*tlv) + chan_len +
+	      sizeof(*tlv) + ssid_len +
+	      sizeof(*tlv) + bssid_len +
+	      sizeof(*tlv) + ie_len;
 
 	skb = ath10k_wmi_alloc_skb(ar, len);
 	if (!skb)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index e518b64..75f7a7b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1711,8 +1711,8 @@
 	cmd_hdr->cmd_id = __cpu_to_le32(cmd);
 
 	memset(skb_cb, 0, sizeof(*skb_cb));
+	trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
 	ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
-	trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
 
 	if (ret)
 		goto err_pull;
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index e711ab4..a1d8947 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -1037,6 +1037,41 @@
 }
 EXPORT_SYMBOL(cnss_force_fw_assert);
 
+int cnss_force_collect_rddm(struct device *dev)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	int ret = 0;
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	if (plat_priv->device_id == QCA6174_DEVICE_ID) {
+		cnss_pr_info("Force collect rddm is not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+		cnss_pr_info("Recovery is already in progress, ignore forced collect rddm\n");
+		return 0;
+	}
+
+	cnss_driver_event_post(plat_priv,
+			       CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
+			       0, NULL);
+
+	reinit_completion(&plat_priv->rddm_complete);
+	ret = wait_for_completion_timeout
+		(&plat_priv->rddm_complete,
+		 msecs_to_jiffies(CNSS_RDDM_TIMEOUT_MS));
+	if (!ret)
+		ret = -ETIMEDOUT;
+
+	return ret;
+}
+EXPORT_SYMBOL(cnss_force_collect_rddm);
+
 static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
@@ -1570,6 +1605,7 @@
 
 	init_completion(&plat_priv->power_up_complete);
 	init_completion(&plat_priv->cal_complete);
+	init_completion(&plat_priv->rddm_complete);
 	mutex_init(&plat_priv->dev_lock);
 
 	return 0;
@@ -1577,6 +1613,7 @@
 
 static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
 {
+	complete_all(&plat_priv->rddm_complete);
 	complete_all(&plat_priv->cal_complete);
 	complete_all(&plat_priv->power_up_complete);
 	device_init_wakeup(&plat_priv->plat_dev->dev, false);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index dd14bbe..f18b08a 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -26,6 +26,7 @@
 #define MAX_NO_OF_MAC_ADDR		4
 #define QMI_WLFW_MAX_TIMESTAMP_LEN	32
 #define QMI_WLFW_MAX_NUM_MEM_SEG	32
+#define CNSS_RDDM_TIMEOUT_MS		20000
 
 #define CNSS_EVENT_SYNC   BIT(0)
 #define CNSS_EVENT_UNINTERRUPTIBLE BIT(1)
@@ -251,6 +252,7 @@
 	u8 *diag_reg_read_buf;
 	bool cal_done;
 	char firmware_name[13];
+	struct completion rddm_complete;
 };
 
 struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev);
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index c974a1bf..df8f4ad 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -17,6 +17,7 @@
 #include <linux/of.h>
 #include <linux/pm_runtime.h>
 #include <linux/memblock.h>
+#include <linux/completion.h>
 #include <soc/qcom/ramdump.h>
 
 #include "main.h"
@@ -554,6 +555,11 @@
 		return;
 	}
 
+	if (test_bit(CNSS_MHI_RDDM_DONE, &plat_priv->driver_state)) {
+		cnss_pr_dbg("RDDM already collected, return\n");
+		return;
+	}
+
 	cnss_pci_collect_dump_info(pci_priv, true);
 }
 
@@ -1529,6 +1535,12 @@
 	if (!plat_priv)
 		return -ENODEV;
 
+	if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
+		cnss_pr_err("RDDM already collected 0x%x, return\n",
+			    pci_priv->mhi_state);
+		return 0;
+	}
+
 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
 	if (ret) {
 		cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
@@ -1907,6 +1919,8 @@
 		return "RESUME";
 	case CNSS_MHI_TRIGGER_RDDM:
 		return "TRIGGER_RDDM";
+	case CNSS_MHI_RDDM_DONE:
+		return "RDDM_DONE";
 	default:
 		return "UNKNOWN";
 	}
@@ -1983,6 +1997,9 @@
 
 	if (dump_data->nentries > 0)
 		plat_priv->ramdump_info_v2.dump_data_valid = true;
+
+	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE);
+	complete(&plat_priv->rddm_complete);
 }
 
 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
@@ -2188,6 +2205,8 @@
 		    !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
 			return 0;
 		break;
+	case CNSS_MHI_RDDM_DONE:
+		return 0;
 	default:
 		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
 			    cnss_mhi_state_to_str(mhi_state), mhi_state);
@@ -2216,6 +2235,7 @@
 	case CNSS_MHI_POWER_OFF:
 	case CNSS_MHI_FORCE_POWER_OFF:
 		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
 		break;
 	case CNSS_MHI_SUSPEND:
 		set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
@@ -2225,6 +2245,9 @@
 		break;
 	case CNSS_MHI_TRIGGER_RDDM:
 		break;
+	case CNSS_MHI_RDDM_DONE:
+		set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
+		break;
 	default:
 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
 	}
@@ -2283,6 +2306,8 @@
 	case CNSS_MHI_TRIGGER_RDDM:
 		ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
 		break;
+	case CNSS_MHI_RDDM_DONE:
+		break;
 	default:
 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
 		ret = -EINVAL;
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 6476ce1..32dd323 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -31,6 +31,7 @@
 	CNSS_MHI_RESUME,
 	CNSS_MHI_TRIGGER_RDDM,
 	CNSS_MHI_RDDM,
+	CNSS_MHI_RDDM_DONE,
 };
 
 struct cnss_msi_user {
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 95e9641..4bb36dc 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2569,9 +2569,6 @@
 				IEEE80211_VHT_CAP_SHORT_GI_80 |
 				IEEE80211_VHT_CAP_SHORT_GI_160 |
 				IEEE80211_VHT_CAP_TXSTBC |
-				IEEE80211_VHT_CAP_RXSTBC_1 |
-				IEEE80211_VHT_CAP_RXSTBC_2 |
-				IEEE80211_VHT_CAP_RXSTBC_3 |
 				IEEE80211_VHT_CAP_RXSTBC_4 |
 				IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
 			sband->vht_cap.vht_mcs.rx_mcs_map =
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index ae87b39..2e92872 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2928,6 +2928,8 @@
 
 	while (buflen >= sizeof(*auth_req)) {
 		auth_req = (void *)buf;
+		if (buflen < le32_to_cpu(auth_req->length))
+			return;
 		type = "unknown";
 		flags = le32_to_cpu(auth_req->flags);
 		pairwise_error = false;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 7f4da72..96f83f0 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -35,6 +35,7 @@
 #include "wl12xx_80211.h"
 #include "cmd.h"
 #include "event.h"
+#include "ps.h"
 #include "tx.h"
 #include "hw_ops.h"
 
@@ -191,6 +192,10 @@
 
 	timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
 
+	ret = wl1271_ps_elp_wakeup(wl);
+	if (ret < 0)
+		return ret;
+
 	do {
 		if (time_after(jiffies, timeout_time)) {
 			wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
@@ -222,6 +227,7 @@
 	} while (!event);
 
 out:
+	wl1271_ps_elp_sleep(wl);
 	kfree(events_vector);
 	return ret;
 }
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 3c4c58b..3b6fb5b 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -332,20 +332,22 @@
 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
 			    u32 off)
 {
-	u32 *mapping = &vif->hash.mapping[off];
+	u32 *mapping = vif->hash.mapping;
 	struct gnttab_copy copy_op = {
 		.source.u.ref = gref,
 		.source.domid = vif->domid,
-		.dest.u.gmfn = virt_to_gfn(mapping),
 		.dest.domid = DOMID_SELF,
-		.dest.offset = xen_offset_in_page(mapping),
-		.len = len * sizeof(u32),
+		.len = len * sizeof(*mapping),
 		.flags = GNTCOPY_source_gref
 	};
 
-	if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+	if ((off + len < off) || (off + len > vif->hash.size) ||
+	    len > XEN_PAGE_SIZE / sizeof(*mapping))
 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
 
+	copy_op.dest.u.gmfn = virt_to_gfn(mapping + off);
+	copy_op.dest.offset = xen_offset_in_page(mapping + off);
+
 	while (len-- != 0)
 		if (mapping[off++] >= vif->num_queues)
 			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 7f6af10..3c1adb3 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -906,7 +906,11 @@
 			BUG_ON(pull_to <= skb_headlen(skb));
 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 		}
-		BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
+		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+			queue->rx.rsp_cons = ++cons;
+			kfree_skb(nskb);
+			return ~0U;
+		}
 
 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 				skb_frag_page(nfrag),
@@ -1043,6 +1047,8 @@
 		skb->len += rx->status;
 
 		i = xennet_fill_frags(queue, skb, &tmpq);
+		if (unlikely(i == ~0U))
+			goto err;
 
 		if (rx->flags & XEN_NETRXF_csum_blank)
 			skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 53bd325..2dfd877 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -65,6 +65,7 @@
 
 	struct nvmet_req	req;
 
+	bool			allocated;
 	u8			n_rdma;
 	u32			flags;
 	u32			invalidate_rkey;
@@ -167,11 +168,19 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&queue->rsps_lock, flags);
-	rsp = list_first_entry(&queue->free_rsps,
+	rsp = list_first_entry_or_null(&queue->free_rsps,
 				struct nvmet_rdma_rsp, free_list);
-	list_del(&rsp->free_list);
+	if (likely(rsp))
+		list_del(&rsp->free_list);
 	spin_unlock_irqrestore(&queue->rsps_lock, flags);
 
+	if (unlikely(!rsp)) {
+		rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+		if (unlikely(!rsp))
+			return NULL;
+		rsp->allocated = true;
+	}
+
 	return rsp;
 }
 
@@ -180,6 +189,11 @@
 {
 	unsigned long flags;
 
+	if (rsp->allocated) {
+		kfree(rsp);
+		return;
+	}
+
 	spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
 	list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
 	spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
@@ -755,6 +769,15 @@
 
 	cmd->queue = queue;
 	rsp = nvmet_rdma_get_rsp(queue);
+	if (unlikely(!rsp)) {
+		/*
+		 * we get here only under memory pressure,
+		 * silently drop and have the host retry
+		 * as we can't even fail it.
+		 */
+		nvmet_rdma_post_recv(queue->dev, cmd);
+		return;
+	}
 	rsp->queue = queue;
 	rsp->cmd = cmd;
 	rsp->flags = 0;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 90b5a89..0a1ebbb 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -548,6 +548,9 @@
 	struct of_phandle_args args;
 	int i, rc;
 
+	if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+		return;
+
 	np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
 	if (!np) {
 		pr_err("missing testcase data\n");
@@ -622,6 +625,9 @@
 	struct of_phandle_args args;
 	int i, rc;
 
+	if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+		return;
+
 	np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
 	if (!np) {
 		pr_err("missing testcase data\n");
@@ -778,15 +784,19 @@
 	pdev = of_find_device_by_node(np);
 	unittest(pdev, "device 1 creation failed\n");
 
-	irq = platform_get_irq(pdev, 0);
-	unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
+	if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+		irq = platform_get_irq(pdev, 0);
+		unittest(irq == -EPROBE_DEFER,
+			 "device deferred probe failed - %d\n", irq);
 
-	/* Test that a parsing failure does not return -EPROBE_DEFER */
-	np = of_find_node_by_path("/testcase-data/testcase-device2");
-	pdev = of_find_device_by_node(np);
-	unittest(pdev, "device 2 creation failed\n");
-	irq = platform_get_irq(pdev, 0);
-	unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+		/* Test that a parsing failure does not return -EPROBE_DEFER */
+		np = of_find_node_by_path("/testcase-data/testcase-device2");
+		pdev = of_find_device_by_node(np);
+		unittest(pdev, "device 2 creation failed\n");
+		irq = platform_get_irq(pdev, 0);
+		unittest(irq < 0 && irq != -EPROBE_DEFER,
+			 "device parsing error failed - %d\n", irq);
+	}
 
 	np = of_find_node_by_path("/testcase-data/platform-tests");
 	unittest(np, "No testcase data in device tree\n");
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 11bad82..1dbd09c 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -976,6 +976,7 @@
 		return -ENOMEM;
 	}
 
+	pci_bus_size_bridges(bus);
 	pci_bus_assign_resources(bus);
 
 	list_for_each_entry(child, &bus->children, node)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6b3c5c4..ccbbd4c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1114,12 +1114,12 @@
 EXPORT_SYMBOL(pci_save_state);
 
 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
-				     u32 saved_val, int retry)
+				     u32 saved_val, int retry, bool force)
 {
 	u32 val;
 
 	pci_read_config_dword(pdev, offset, &val);
-	if (val == saved_val)
+	if (!force && val == saved_val)
 		return;
 
 	for (;;) {
@@ -1138,25 +1138,36 @@
 }
 
 static void pci_restore_config_space_range(struct pci_dev *pdev,
-					   int start, int end, int retry)
+					   int start, int end, int retry,
+					   bool force)
 {
 	int index;
 
 	for (index = end; index >= start; index--)
 		pci_restore_config_dword(pdev, 4 * index,
 					 pdev->saved_config_space[index],
-					 retry);
+					 retry, force);
 }
 
 static void pci_restore_config_space(struct pci_dev *pdev)
 {
 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-		pci_restore_config_space_range(pdev, 10, 15, 0);
+		pci_restore_config_space_range(pdev, 10, 15, 0, false);
 		/* Restore BARs before the command register. */
-		pci_restore_config_space_range(pdev, 4, 9, 10);
-		pci_restore_config_space_range(pdev, 0, 3, 0);
+		pci_restore_config_space_range(pdev, 4, 9, 10, false);
+		pci_restore_config_space_range(pdev, 0, 3, 0, false);
+	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+		pci_restore_config_space_range(pdev, 12, 15, 0, false);
+
+		/*
+		 * Force rewriting of prefetch registers to avoid S3 resume
+		 * issues on Intel PCI bridges that occur when these
+		 * registers are not explicitly written.
+		 */
+		pci_restore_config_space_range(pdev, 9, 11, 0, true);
+		pci_restore_config_space_range(pdev, 0, 8, 0, false);
 	} else {
-		pci_restore_config_space_range(pdev, 0, 15, 0);
+		pci_restore_config_space_range(pdev, 0, 15, 0, false);
 	}
 }
 
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index a05d143..c7a695c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4236,11 +4236,6 @@
  *
  * 0x9d10-0x9d1b PCI Express Root port #{1-12}
  *
- * The 300 series chipset suffers from the same bug so include those root
- * ports here as well.
- *
- * 0xa32c-0xa343 PCI Express Root port #{0-24}
- *
  * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
  * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
  * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
@@ -4258,7 +4253,6 @@
 	case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
 	case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
 	case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
-	case 0xa32c ... 0xa343:				/* 300 series */
 		return true;
 	}
 
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index aa29688..4cf3aba 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -629,6 +629,7 @@
 static void msm_gpio_irq_unmask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	uint32_t irqtype = irqd_get_trigger_type(d);
 	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
 	const struct msm_pingroup *g;
 	unsigned long flags;
@@ -638,6 +639,12 @@
 
 	spin_lock_irqsave(&pctrl->lock, flags);
 
+	if (irqtype & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
+		val = readl_relaxed(pctrl->regs + g->intr_status_reg);
+		val &= ~BIT(g->intr_status_bit);
+		writel_relaxed(val, pctrl->regs + g->intr_status_reg);
+	}
+
 	val = readl(pctrl->regs + g->intr_cfg_reg);
 	val |= BIT(g->intr_enable_bit);
 	writel(val, pctrl->regs + g->intr_cfg_reg);
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
index 5c3828b..8e21e85 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
@@ -1342,43 +1342,46 @@
 		}
 
 		dev->power_on = true;
+		/* check link status during initial bootup */
+		if (!dev->enumerated) {
+			val = readl_relaxed(dev->parf + PCIE20_PARF_PM_STTS);
+			val = val & PARF_XMLH_LINK_UP;
+			EP_PCIE_DBG(dev, "PCIe V%d: Link status is 0x%x.\n",
+					dev->rev, val);
+			if (val) {
+				EP_PCIE_INFO(dev,
+					"PCIe V%d: link initialized by bootloader for LE PCIe endpoint; skip link training in HLOS.\n",
+					dev->rev);
+				ep_pcie_core_init(dev, true);
+				dev->link_status = EP_PCIE_LINK_UP;
+				dev->l23_ready = false;
+				goto checkbme;
+			} else {
+				ltssm_en = readl_relaxed(dev->parf
+					+ PCIE20_PARF_LTSSM) & BIT(8);
+
+				if (ltssm_en) {
+					EP_PCIE_ERR(dev,
+						"PCIe V%d: link is not up when LTSSM has already enabled by bootloader.\n",
+						dev->rev);
+					ret = EP_PCIE_ERROR;
+					goto link_fail;
+				} else {
+					EP_PCIE_DBG(dev,
+						"PCIe V%d: Proceed with regular link training.\n",
+						dev->rev);
+				}
+			}
+		}
+
+		ret = ep_pcie_reset_init(dev);
+		if (ret)
+			goto link_fail;
 	}
 
 	if (!(opt & EP_PCIE_OPT_ENUM))
 		goto out;
 
-	/* check link status during initial bootup */
-	if (!dev->enumerated) {
-		val = readl_relaxed(dev->parf + PCIE20_PARF_PM_STTS);
-		val = val & PARF_XMLH_LINK_UP;
-		EP_PCIE_DBG(dev, "PCIe V%d: Link status is 0x%x.\n", dev->rev,
-				val);
-		if (val) {
-			EP_PCIE_INFO(dev,
-				"PCIe V%d: link initialized by bootloader for LE PCIe endpoint; skip link training in HLOS.\n",
-				dev->rev);
-			ep_pcie_core_init(dev, true);
-			dev->link_status = EP_PCIE_LINK_UP;
-			dev->l23_ready = false;
-			goto checkbme;
-		} else {
-			ltssm_en = readl_relaxed(dev->parf
-					+ PCIE20_PARF_LTSSM) & BIT(8);
-
-			if (ltssm_en) {
-				EP_PCIE_ERR(dev,
-					"PCIe V%d: link is not up when LTSSM has already enabled by bootloader.\n",
-					dev->rev);
-				ret = EP_PCIE_ERROR;
-				goto link_fail;
-			} else {
-				EP_PCIE_DBG(dev,
-					"PCIe V%d: Proceed with regular link training.\n",
-					dev->rev);
-			}
-		}
-	}
-
 	if (opt & EP_PCIE_OPT_AST_WAKE) {
 		/* assert PCIe WAKE# */
 		EP_PCIE_INFO(dev, "PCIe V%d: assert PCIe WAKE#.\n",
@@ -1430,9 +1433,6 @@
 		}
 	}
 
-	ret = ep_pcie_reset_init(dev);
-	if (ret)
-		goto link_fail;
 	/* init PCIe PHY */
 	ep_pcie_phy_init(dev);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 690d564..ec1d4d9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -1771,7 +1771,7 @@
 
 	buff = kzalloc(buff_size, GFP_KERNEL);
 	if (buff == NULL)
-		return 0;
+		return -ENOMEM;
 
 	if (!ipa3_ctx->nat_mem.dev.is_dev_init) {
 		pos += scnprintf(buff + pos, buff_size - pos,
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
index 3199b29..ade1f0c 100644
--- a/drivers/platform/msm/mhi_dev/mhi.c
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -1984,6 +1984,8 @@
 	int rc;
 
 	ch = handle->channel;
+	if (!ch)
+		return -EINVAL;
 
 	rc = ch->ring->rd_offset == ch->ring->wr_offset;
 
@@ -2749,11 +2751,7 @@
 			ring->ring_cache_dma_handle);
 	}
 
-	for (i = 0; i < mhi->cfg.channels; i++)
-		mutex_destroy(&mhi->ch[i].ch_lock);
-
 	devm_kfree(&pdev->dev, mhi->mmio_backup);
-	devm_kfree(&pdev->dev, mhi->ch);
 	devm_kfree(&pdev->dev, mhi->ring);
 
 	mhi_dev_sm_exit(mhi);
@@ -2781,14 +2779,20 @@
 	if (!mhi->ring)
 		return -ENOMEM;
 
-	mhi->ch = devm_kzalloc(&pdev->dev,
+	/*
+	 * mhi_init is also called during device reset, in
+	 * which case channel mem will already be allocated.
+	 */
+	if (!mhi->ch) {
+		mhi->ch = devm_kzalloc(&pdev->dev,
 			(sizeof(struct mhi_dev_channel) *
 			(mhi->cfg.channels)), GFP_KERNEL);
-	if (!mhi->ch)
-		return -ENOMEM;
+		if (!mhi->ch)
+			return -ENOMEM;
 
-	for (i = 0; i < mhi->cfg.channels; i++)
-		mutex_init(&mhi->ch[i].ch_lock);
+		for (i = 0; i < mhi->cfg.channels; i++)
+			mutex_init(&mhi->ch[i].ch_lock);
+	}
 
 	spin_lock_init(&mhi->lock);
 	mhi->mmio_backup = devm_kzalloc(&pdev->dev,
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index 0056294..fe41993 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -518,6 +518,7 @@
 		if (obj && obj->type == ACPI_TYPE_INTEGER)
 			*out_data = (u32) obj->integer.value;
 	}
+	kfree(output.pointer);
 	return status;
 
 }
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 102f95a..e9e749f 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -35,6 +35,7 @@
 }
 
 static struct device *vexpress_power_off_device;
+static atomic_t vexpress_restart_nb_refcnt = ATOMIC_INIT(0);
 
 static void vexpress_power_off(void)
 {
@@ -99,10 +100,13 @@
 	int err;
 
 	vexpress_restart_device = dev;
-	err = register_restart_handler(&vexpress_restart_nb);
-	if (err) {
-		dev_err(dev, "cannot register restart handler (err=%d)\n", err);
-		return err;
+	if (atomic_inc_return(&vexpress_restart_nb_refcnt) == 1) {
+		err = register_restart_handler(&vexpress_restart_nb);
+		if (err) {
+			dev_err(dev, "cannot register restart handler (err=%d)\n", err);
+			atomic_dec(&vexpress_restart_nb_refcnt);
+			return err;
+		}
 	}
 	device_create_file(dev, &dev_attr_active);
 
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 077d237..77b6885 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/notifier.h>
 #include <linux/err.h>
@@ -141,8 +142,13 @@
 	struct power_supply *psy = container_of(work, struct power_supply,
 						deferred_register_work.work);
 
-	if (psy->dev.parent)
-		mutex_lock(&psy->dev.parent->mutex);
+	if (psy->dev.parent) {
+		while (!mutex_trylock(&psy->dev.parent->mutex)) {
+			if (psy->removing)
+				return;
+			msleep(10);
+		}
+	}
 
 	psy_register_cooler(psy->dev.parent, psy);
 	power_supply_changed(psy);
@@ -948,6 +954,7 @@
 void power_supply_unregister(struct power_supply *psy)
 {
 	WARN_ON(atomic_dec_return(&psy->use_cnt));
+	psy->removing = true;
 	cancel_work_sync(&psy->changed_work);
 	cancel_delayed_work_sync(&psy->deferred_register_work);
 	sysfs_remove_link(&psy->dev.kobj, "powers");
diff --git a/drivers/power/supply/qcom/qg-soc.c b/drivers/power/supply/qcom/qg-soc.c
index 711bd2b..a4da904 100644
--- a/drivers/power/supply/qcom/qg-soc.c
+++ b/drivers/power/supply/qcom/qg-soc.c
@@ -53,7 +53,7 @@
 
 	chip->sys_soc = CAP(QG_MIN_SOC, QG_MAX_SOC, chip->sys_soc);
 
-	if (chip->sys_soc == QG_MIN_SOC) {
+	if (chip->sys_soc < 100) {
 		/* Hold SOC to 1% of VBAT has not dropped below cutoff */
 		rc = qg_get_battery_voltage(chip, &vbat_uv);
 		if (!rc && vbat_uv >= (vcutoff_uv + VBAT_LOW_HYST_UV))
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 1a25499..0d81304 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4458,13 +4458,13 @@
 	    !rdev->desc->fixed_uV)
 		rdev->is_switch = true;
 
+	dev_set_drvdata(&rdev->dev, rdev);
 	ret = device_register(&rdev->dev);
 	if (ret != 0) {
 		put_device(&rdev->dev);
 		goto unset_supplies;
 	}
 
-	dev_set_drvdata(&rdev->dev, rdev);
 	rdev_init_debugfs(rdev);
 	rdev->proxy_consumer = regulator_proxy_consumer_register(dev,
 							config->of_node);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 258a728..a5e6030 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -23,6 +23,7 @@
 #include <linux/netdevice.h>
 #include <linux/netdev_features.h>
 #include <linux/skbuff.h>
+#include <linux/vmalloc.h>
 
 #include <net/iucv/af_iucv.h>
 #include <net/dsfield.h>
@@ -4715,7 +4716,7 @@
 
 	priv.buffer_len = oat_data.buffer_len;
 	priv.response_len = 0;
-	priv.buffer =  kzalloc(oat_data.buffer_len, GFP_KERNEL);
+	priv.buffer = vzalloc(oat_data.buffer_len);
 	if (!priv.buffer) {
 		rc = -ENOMEM;
 		goto out;
@@ -4756,7 +4757,7 @@
 			rc = -EFAULT;
 
 out_free:
-	kfree(priv.buffer);
+	vfree(priv.buffer);
 out:
 	return rc;
 }
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index e94e957..58404e6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -491,7 +491,7 @@
 		default:
 			dev_kfree_skb_any(skb);
 			QETH_CARD_TEXT(card, 3, "inbunkno");
-			QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+			QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
 			continue;
 		}
 		work_done++;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 4ca161b..efefe07 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1836,7 +1836,7 @@
 		default:
 			dev_kfree_skb_any(skb);
 			QETH_CARD_TEXT(card, 3, "inbunkno");
-			QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+			QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
 			continue;
 		}
 		work_done++;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 42921db..4ca1050 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2742,6 +2742,8 @@
 					      BNX2X_DOORBELL_PCI_BAR);
 		reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
 		ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+		if (!ep->qp.ctx_base)
+			return -ENOMEM;
 		goto arm_cq;
 	}
 
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d9534ee..e173022 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -93,7 +93,7 @@
 static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
 static int fast_fail = 1;
 static int client_reserve = 1;
-static char partition_name[97] = "UNKNOWN";
+static char partition_name[96] = "UNKNOWN";
 static unsigned int partition_number = -1;
 
 static struct scsi_transport_template *ibmvscsi_transport_template;
@@ -259,7 +259,7 @@
 
 	ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
 	if (ppartition_name)
-		strncpy(partition_name, ppartition_name,
+		strlcpy(partition_name, ppartition_name,
 				sizeof(partition_name));
 	p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
 	if (p_number_ptr)
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 35cbd36..090fdcd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6193,6 +6193,9 @@
 			goto fail_init_mfi;
 	}
 
+	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
+		goto fail_init_mfi;
+
 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
 		     (unsigned long)instance);
 
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 0824a81..07ea4fc 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -440,8 +440,8 @@
 static inline int fcpcmd_is_corrupted(struct atio *atio)
 {
 	if (atio->entry_type == ATIO_TYPE7 &&
-	    (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
-	    FCP_CMD_LENGTH_MIN))
+	    ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
+	     FCP_CMD_LENGTH_MIN))
 		return 1;
 	else
 		return 0;
diff --git a/drivers/soc/qcom/bg_rsb.c b/drivers/soc/qcom/bg_rsb.c
index 1b5830a..e653139 100644
--- a/drivers/soc/qcom/bg_rsb.c
+++ b/drivers/soc/qcom/bg_rsb.c
@@ -35,8 +35,8 @@
 #define BGRSB_MSG_SIZE 0x08
 #define TIMEOUT_MS 2000
 
-#define BGRSB_LDO15_VTG_MIN_UV 3300000
-#define BGRSB_LDO15_VTG_MAX_UV 3300000
+#define BGRSB_LDO15_VTG_MIN_UV 3000000
+#define BGRSB_LDO15_VTG_MAX_UV 3000000
 
 #define BGRSB_LDO11_VTG_MIN_UV 1800000
 #define BGRSB_LDO11_VTG_MAX_UV 1800000
@@ -135,6 +135,8 @@
 
 	bool calibration_needed;
 	bool is_calibrd;
+
+	bool is_cnfgrd;
 };
 
 static void *bgrsb_drv;
@@ -415,6 +417,7 @@
 			pr_err("Failed to unvote LDO-11 on BG down\n");
 	}
 
+	dev->is_cnfgrd = false;
 	pr_info("RSB current state is : %d\n", dev->bgrsb_current_state);
 
 	if (dev->bgrsb_current_state == BGRSB_STATE_INIT) {
@@ -452,6 +455,9 @@
 		else
 			pr_err("Failed to unvote LDO-11 on BG Glink down\n");
 	}
+
+	dev->is_cnfgrd = false;
+
 	if (dev->handle)
 		glink_close(dev->handle);
 	dev->handle = NULL;
@@ -562,6 +568,8 @@
 				dev->bgrsb_current_state = BGRSB_STATE_INIT;
 			return;
 		}
+
+		dev->is_cnfgrd = true;
 		dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED;
 		pr_debug("RSB Cofigured\n");
 	}
@@ -592,6 +600,7 @@
 				dev->bgrsb_current_state = BGRSB_STATE_INIT;
 			return;
 		}
+		dev->is_cnfgrd = true;
 		dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED;
 		pr_debug("Glink RSB Cofigured\n");
 	}
@@ -715,6 +724,11 @@
 			container_of(work, struct bgrsb_priv,
 							rsb_calibration_work);
 
+	if (!dev->is_cnfgrd) {
+		pr_err("RSB is not configured\n");
+		return;
+	}
+
 	req.cmd_id = 0x03;
 	req.data = dev->calbrtion_cpi;
 
@@ -744,6 +758,11 @@
 			container_of(work, struct bgrsb_priv,
 							bttn_configr_work);
 
+	if (!dev->is_cnfgrd) {
+		pr_err("RSB is not configured\n");
+		return;
+	}
+
 	req.cmd_id = 0x05;
 	req.data = dev->bttn_configs;
 
@@ -993,7 +1012,8 @@
 		goto ret_success;
 
 	if (dev->bgrsb_current_state == BGRSB_STATE_INIT) {
-		if (bgrsb_ldo_work(dev, BGRSB_ENABLE_LDO11) == 0) {
+		if (dev->is_cnfgrd &&
+			bgrsb_ldo_work(dev, BGRSB_ENABLE_LDO11) == 0) {
 			dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED;
 			pr_debug("RSB Cofigured\n");
 			goto ret_success;
diff --git a/drivers/soc/qcom/bgcom_spi.c b/drivers/soc/qcom/bgcom_spi.c
index c6fc2db..b13f06d 100644
--- a/drivers/soc/qcom/bgcom_spi.c
+++ b/drivers/soc/qcom/bgcom_spi.c
@@ -464,7 +464,6 @@
 int bgcom_ahb_read(void *handle, uint32_t ahb_start_addr,
 	uint32_t num_words, void *read_buf)
 {
-	dma_addr_t dma_hndl_tx, dma_hndl_rx;
 	uint32_t txn_len;
 	uint8_t *tx_buf;
 	uint8_t *rx_buf;
@@ -472,7 +471,6 @@
 	int ret;
 	uint8_t cmnd = 0;
 	uint32_t ahb_addr = 0;
-	struct spi_device *spi = get_spi_device();
 
 	if (!handle || !read_buf || num_words == 0
 		|| num_words > BG_SPI_MAX_WORDS) {
@@ -495,16 +493,13 @@
 	size = num_words*BG_SPI_WORD_SIZE;
 	txn_len = BG_SPI_AHB_READ_CMD_LEN + size;
 
-
-	tx_buf = dma_zalloc_coherent(&spi->dev, txn_len,
-					&dma_hndl_tx, GFP_KERNEL);
+	tx_buf = kzalloc(txn_len, GFP_KERNEL | GFP_ATOMIC);
 	if (!tx_buf)
 		return -ENOMEM;
 
-	rx_buf = dma_zalloc_coherent(&spi->dev, txn_len,
-					&dma_hndl_rx, GFP_KERNEL);
+	rx_buf = kzalloc(txn_len, GFP_KERNEL | GFP_ATOMIC);
 	if (!rx_buf) {
-		dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl_tx);
+		kfree(tx_buf);
 		return -ENOMEM;
 	}
 
@@ -519,8 +514,8 @@
 	if (!ret)
 		memcpy(read_buf, rx_buf+BG_SPI_AHB_READ_CMD_LEN, size);
 
-	dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl_tx);
-	dma_free_coherent(&spi->dev, txn_len, rx_buf, dma_hndl_rx);
+	kfree(tx_buf);
+	kfree(rx_buf);
 	return ret;
 }
 EXPORT_SYMBOL(bgcom_ahb_read);
@@ -557,7 +552,6 @@
 		return -EBUSY;
 	}
 
-
 	mutex_lock(&cma_buffer_lock);
 	size = num_words*BG_SPI_WORD_SIZE;
 	txn_len = BG_SPI_AHB_CMD_LEN + size;
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
index f4c7779..ea4b5a5 100644
--- a/drivers/soc/qcom/sysmon-qmi.c
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -88,6 +88,7 @@
 static void sysmon_clnt_svc_exit(struct work_struct *work);
 
 static const int notif_map[SUBSYS_NOTIF_TYPE_COUNT] = {
+	[0 ... SUBSYS_NOTIF_TYPE_COUNT - 1] = SSCTL_SSR_EVENT_INVALID,
 	[SUBSYS_BEFORE_POWERUP] = SSCTL_SSR_EVENT_BEFORE_POWERUP,
 	[SUBSYS_AFTER_POWERUP] = SSCTL_SSR_EVENT_AFTER_POWERUP,
 	[SUBSYS_BEFORE_SHUTDOWN] = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
@@ -147,6 +148,11 @@
 	}
 }
 
+static bool is_ssctl_event(enum subsys_notif_type notif)
+{
+	return notif_map[notif] != SSCTL_SSR_EVENT_INVALID;
+}
+
 static void sysmon_clnt_svc_arrive(struct work_struct *work)
 {
 	int rc;
@@ -318,8 +324,8 @@
 	const char *dest_ss = dest_desc->name;
 	int ret;
 
-	if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || event_ss == NULL
-		|| dest_ss == NULL)
+	if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT ||
+	    !is_ssctl_event(notif) || event_ss == NULL || dest_ss == NULL)
 		return -EINVAL;
 
 	mutex_lock(&sysmon_list_lock);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index a816f07..093c9cf 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -597,11 +597,13 @@
 
 	ret = wait_event_interruptible_timeout(rspi->wait,
 					       rspi->dma_callbacked, HZ);
-	if (ret > 0 && rspi->dma_callbacked)
+	if (ret > 0 && rspi->dma_callbacked) {
 		ret = 0;
-	else if (!ret) {
-		dev_err(&rspi->master->dev, "DMA timeout\n");
-		ret = -ETIMEDOUT;
+	} else {
+		if (!ret) {
+			dev_err(&rspi->master->dev, "DMA timeout\n");
+			ret = -ETIMEDOUT;
+		}
 		if (tx)
 			dmaengine_terminate_all(rspi->master->dma_tx);
 		if (rx)
@@ -1313,12 +1315,36 @@
 
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+	return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+	return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS	&rspi_pm_ops
+#else
+#define DEV_PM_OPS	NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver rspi_driver = {
 	.probe =	rspi_probe,
 	.remove =	rspi_remove,
 	.id_table =	spi_driver_ids,
 	.driver		= {
 		.name = "renesas_spi",
+		.pm = DEV_PM_OPS,
 		.of_match_table = of_match_ptr(rspi_of_match),
 	},
 };
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index cbf02eb..711ea52 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -373,7 +373,8 @@
 
 static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 {
-	sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+	sh_msiof_write(p, STR,
+		       sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
 }
 
 static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1275,12 +1276,37 @@
 };
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+	return spi_master_suspend(p->master);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+	return spi_master_resume(p->master);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+			 sh_msiof_spi_resume);
+#define DEV_PM_OPS	&sh_msiof_spi_pm_ops
+#else
+#define DEV_PM_OPS	NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver sh_msiof_spi_drv = {
 	.probe		= sh_msiof_spi_probe,
 	.remove		= sh_msiof_spi_remove,
 	.id_table	= spi_driver_ids,
 	.driver		= {
 		.name		= "spi_sh_msiof",
+		.pm		= DEV_PM_OPS,
 		.of_match_table = of_match_ptr(sh_msiof_match),
 	},
 };
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 85c91f5..af2880d 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1063,6 +1063,24 @@
 		goto exit_free_master;
 	}
 
+	/* disabled clock may cause interrupt storm upon request */
+	tspi->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(tspi->clk)) {
+		ret = PTR_ERR(tspi->clk);
+		dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+		goto exit_free_master;
+	}
+	ret = clk_prepare(tspi->clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
+		goto exit_free_master;
+	}
+	ret = clk_enable(tspi->clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+		goto exit_free_master;
+	}
+
 	spi_irq = platform_get_irq(pdev, 0);
 	tspi->irq = spi_irq;
 	ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
 					tspi->irq);
-		goto exit_free_master;
-	}
-
-	tspi->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(tspi->clk)) {
-		dev_err(&pdev->dev, "can not get clock\n");
-		ret = PTR_ERR(tspi->clk);
-		goto exit_free_irq;
+		goto exit_clk_disable;
 	}
 
 	tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@
 	tegra_slink_deinit_dma_param(tspi, true);
 exit_free_irq:
 	free_irq(spi_irq, tspi);
+exit_clk_disable:
+	clk_disable(tspi->clk);
 exit_free_master:
 	spi_master_put(master);
 	return ret;
@@ -1150,6 +1163,8 @@
 
 	free_irq(tspi->irq, tspi);
 
+	clk_disable(tspi->clk);
+
 	if (tspi->tx_dma_chan)
 		tegra_slink_deinit_dma_param(tspi, false);
 
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index f4ffac4..5af176b 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -383,6 +383,12 @@
 		goto out;
 	}
 
+	/* requested mapping size larger than object size */
+	if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	/* requested protection bits must match our allowed protection mask */
 	if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
 		     calc_vm_prot_bits(PROT_MASK, 0))) {
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index b0bbb36..9e63bdf 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -4976,7 +4976,7 @@
 			goto SD_Execute_Write_Cmd_Failed;
 		}
 
-		rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+		retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			goto SD_Execute_Write_Cmd_Failed;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 04d2b6e..80205f3 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1435,7 +1435,8 @@
 
 	sg_init_table(sg, ARRAY_SIZE(sg));
 	sg_set_buf(sg, buf, payload_length);
-	sg_set_buf(sg + 1, pad_bytes, padding);
+	if (padding)
+		sg_set_buf(sg + 1, pad_bytes, padding);
 
 	ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
 
@@ -3949,10 +3950,14 @@
 static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
 {
 	int ret;
-	u8 buffer[ISCSI_HDR_LEN], opcode;
+	u8 *buffer, opcode;
 	u32 checksum = 0, digest = 0;
 	struct kvec iov;
 
+	buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
+	if (!buffer)
+		return;
+
 	while (!kthread_should_stop()) {
 		/*
 		 * Ensure that both TX and RX per connection kthreads
@@ -3960,7 +3965,6 @@
 		 */
 		iscsit_thread_check_cpumask(conn, current, 0);
 
-		memset(buffer, 0, ISCSI_HDR_LEN);
 		memset(&iov, 0, sizeof(struct kvec));
 
 		iov.iov_base	= buffer;
@@ -3969,7 +3973,7 @@
 		ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
 		if (ret != ISCSI_HDR_LEN) {
 			iscsit_rx_thread_wait_for_tcp(conn);
-			return;
+			break;
 		}
 
 		if (conn->conn_ops->HeaderDigest) {
@@ -3979,7 +3983,7 @@
 			ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
 			if (ret != ISCSI_CRC_LEN) {
 				iscsit_rx_thread_wait_for_tcp(conn);
-				return;
+				break;
 			}
 
 			iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
@@ -4003,7 +4007,7 @@
 		}
 
 		if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
-			return;
+			break;
 
 		opcode = buffer[0] & ISCSI_OPCODE_MASK;
 
@@ -4014,13 +4018,15 @@
 			" while in Discovery Session, rejecting.\n", opcode);
 			iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
 					  buffer);
-			return;
+			break;
 		}
 
 		ret = iscsi_target_rx_opcode(conn, buffer);
 		if (ret < 0)
-			return;
+			break;
 	}
+
+	kfree(buffer);
 }
 
 int iscsi_target_rx_thread(void *arg)
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 98f75e5..f0d9730 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -26,27 +26,6 @@
 #include "iscsi_target_nego.h"
 #include "iscsi_target_auth.h"
 
-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
-{
-	int j = DIV_ROUND_UP(len, 2), rc;
-
-	rc = hex2bin(dst, src, j);
-	if (rc < 0)
-		pr_debug("CHAP string contains non hex digit symbols\n");
-
-	dst[j] = '\0';
-	return j;
-}
-
-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
-{
-	int i;
-
-	for (i = 0; i < src_len; i++) {
-		sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
-	}
-}
-
 static void chap_gen_challenge(
 	struct iscsi_conn *conn,
 	int caller,
@@ -59,7 +38,7 @@
 	memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
 
 	get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
-	chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+	bin2hex(challenge_asciihex, chap->challenge,
 				CHAP_CHALLENGE_LENGTH);
 	/*
 	 * Set CHAP_C, and copy the generated challenge into c_str.
@@ -240,9 +219,16 @@
 		pr_err("Could not find CHAP_R.\n");
 		goto out;
 	}
+	if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+		pr_err("Malformed CHAP_R\n");
+		goto out;
+	}
+	if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+		pr_err("Malformed CHAP_R\n");
+		goto out;
+	}
 
 	pr_debug("[server] Got CHAP_R=%s\n", chap_r);
-	chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
 
 	tfm = crypto_alloc_shash("md5", 0, 0);
 	if (IS_ERR(tfm)) {
@@ -286,7 +272,7 @@
 		goto out;
 	}
 
-	chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+	bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
 	pr_debug("[server] MD5 Server Digest: %s\n", response);
 
 	if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
@@ -341,9 +327,7 @@
 		pr_err("Could not find CHAP_C.\n");
 		goto out;
 	}
-	pr_debug("[server] Got CHAP_C=%s\n", challenge);
-	challenge_len = chap_string_to_hex(challenge_binhex, challenge,
-				strlen(challenge));
+	challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
 	if (!challenge_len) {
 		pr_err("Unable to convert incoming challenge\n");
 		goto out;
@@ -352,6 +336,11 @@
 		pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
 		goto out;
 	}
+	if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+		pr_err("Malformed CHAP_C\n");
+		goto out;
+	}
+	pr_debug("[server] Got CHAP_C=%s\n", challenge);
 	/*
 	 * During mutual authentication, the CHAP_C generated by the
 	 * initiator must not match the original CHAP_C generated by
@@ -405,7 +394,7 @@
 	/*
 	 * Convert response from binary hex to ascii hext.
 	 */
-	chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+	bin2hex(response, digest, MD5_SIGNATURE_SIZE);
 	*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
 			response);
 	*nr_out_len += 1;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 63e1dcc..761b065 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -637,8 +637,7 @@
 		none = strstr(buf1, NONE);
 		if (none)
 			goto out;
-		strncat(buf1, ",", strlen(","));
-		strncat(buf1, NONE, strlen(NONE));
+		strlcat(buf1, "," NONE, sizeof(buf1));
 		if (iscsi_update_param_value(param, buf1) < 0)
 			return -EINVAL;
 	}
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 02f93f4..76e163e 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -339,7 +339,7 @@
 	unsigned long clipped_freq = ULONG_MAX, floor_freq = 0;
 	struct cpufreq_cooling_device *cpufreq_dev;
 
-	if (event != CPUFREQ_ADJUST)
+	if (event != CPUFREQ_INCOMPATIBLE)
 		return NOTIFY_DONE;
 
 	mutex_lock(&cooling_list_lock);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index c662cd7..fe811d7 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -396,10 +396,13 @@
 
 	mutex_lock(&tz->lock);
 
-	if (mode == THERMAL_DEVICE_ENABLED)
+	if (mode == THERMAL_DEVICE_ENABLED) {
 		tz->polling_delay = data->polling_delay;
-	else
+		tz->passive_delay = data->passive_delay;
+	} else {
 		tz->polling_delay = 0;
+		tz->passive_delay = 0;
+	}
 
 	mutex_unlock(&tz->lock);
 
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 9510305..741e966 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -41,6 +41,9 @@
 	  If unsure, say Y, or else you won't be able to do much with your new
 	  shiny Linux system :-)
 
+config TTY_FLUSH_LOCAL_ECHO
+	bool
+
 config CONSOLE_TRANSLATIONS
 	depends on VT
 	default y
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 0475f96..442a3130 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -128,6 +128,10 @@
 
 #define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
 
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+static void continue_process_echoes(struct work_struct *work);
+#endif
+
 static inline size_t read_cnt(struct n_tty_data *ldata)
 {
 	return ldata->read_head - ldata->read_tail;
@@ -751,6 +755,16 @@
 			tail++;
 	}
 
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+        if (ldata->echo_commit != tail) {
+                if (!tty->delayed_work) {
+                        INIT_DELAYED_WORK(&tty->echo_delayed_work, continue_process_echoes);
+                        schedule_delayed_work(&tty->echo_delayed_work, 1);
+                }
+                tty->delayed_work = 1;
+        }
+#endif
+
  not_yet_stored:
 	ldata->echo_tail = tail;
 	return old_space - space;
@@ -817,6 +831,20 @@
 	mutex_unlock(&ldata->output_lock);
 }
 
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+static void continue_process_echoes(struct work_struct *work)
+{
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, echo_delayed_work.work);
+	struct n_tty_data *ldata = tty->disc_data;
+
+	mutex_lock(&ldata->output_lock);
+	tty->delayed_work = 0;
+	__process_echoes(tty);
+	mutex_unlock(&ldata->output_lock);
+}
+#endif
+
 /**
  *	add_echo_byte	-	add a byte to the echo buffer
  *	@c: unicode byte to echo
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index 933c268..8106353 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -637,8 +637,10 @@
 	    (link->has_func_id) &&
 	    (link->socket->pcmcia_pfc == 0) &&
 	    ((link->func_id == CISTPL_FUNCID_MULTI) ||
-	     (link->func_id == CISTPL_FUNCID_SERIAL)))
-		pcmcia_loop_config(link, serial_check_for_multi, info);
+	     (link->func_id == CISTPL_FUNCID_SERIAL))) {
+		if (pcmcia_loop_config(link, serial_check_for_multi, info))
+			goto failed;
+	}
 
 	/*
 	 * Apply any multi-port quirk.
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index d3e3d42..0040c29f 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1068,8 +1068,8 @@
 	/* Get the address of the host memory buffer.
 	 */
 	bdp = pinfo->rx_cur;
-	while (bdp->cbd_sc & BD_SC_EMPTY)
-		;
+	if (bdp->cbd_sc & BD_SC_EMPTY)
+		return NO_POLL_CHAR;
 
 	/* If the buffer address is in the CPM DPRAM, don't
 	 * convert it.
@@ -1104,7 +1104,11 @@
 		poll_chars = 0;
 	}
 	if (poll_chars <= 0) {
-		poll_chars = poll_wait_key(poll_buf, pinfo);
+		int ret = poll_wait_key(poll_buf, pinfo);
+
+		if (ret == NO_POLL_CHAR)
+			return ret;
+		poll_chars = ret;
 		pollp = poll_buf;
 	}
 	poll_chars--;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 937f5e1..e2ec049 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -833,7 +833,8 @@
 	struct circ_buf *ring = &sport->rx_ring;
 	int ret, nent;
 	int bits, baud;
-	struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port);
+	struct tty_port *port = &sport->port.state->port;
+	struct tty_struct *tty = port->tty;
 	struct ktermios *termios = &tty->termios;
 
 	baud = tty_get_baud_rate(tty);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index b24edf6..0d82be1 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2197,6 +2197,14 @@
 				ret);
 			return ret;
 		}
+
+		ret = devm_request_irq(&pdev->dev, rtsirq, imx_rtsint, 0,
+				       dev_name(&pdev->dev), sport);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+				ret);
+			return ret;
+		}
 	} else {
 		ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
 				       dev_name(&pdev->dev), sport);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 45b57c2..401c983 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -327,8 +327,10 @@
 	if ((termios->c_cflag & CREAD) == 0)
 		port->ignore_status_mask |= STAT_RX_RDY | STAT_BRK_ERR;
 
-	if (old)
+	if (old) {
 		tty_termios_copy_hw(termios, old);
+		termios->c_cflag |= CS8;
+	}
 
 	baud = uart_get_baud_rate(port, termios, old, 0, 460800);
 	uart_update_timeout(port, termios->c_cflag, baud);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 789c814..4305524 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1475,6 +1475,7 @@
 static int tty_reopen(struct tty_struct *tty)
 {
 	struct tty_driver *driver = tty->driver;
+	int retval;
 
 	if (driver->type == TTY_DRIVER_TYPE_PTY &&
 	    driver->subtype == PTY_TYPE_MASTER)
@@ -1488,10 +1489,14 @@
 
 	tty->count++;
 
-	if (!tty->ldisc)
-		return tty_ldisc_reinit(tty, tty->termios.c_line);
+	if (tty->ldisc)
+		return 0;
 
-	return 0;
+	retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+	if (retval)
+		tty->count--;
+
+	return retval;
 }
 
 /**
@@ -1658,6 +1663,10 @@
 
 	put_pid(tty->pgrp);
 	put_pid(tty->session);
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+	if (tty->echo_delayed_work.work.func)
+		cancel_delayed_work_sync(&tty->echo_delayed_work);
+#endif
 	free_tty_struct(tty);
 }
 
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index f62c598..638eb9b 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -31,6 +31,8 @@
 #include <asm/io.h>
 #include <asm/uaccess.h>
 
+#include <linux/nospec.h>
+
 #include <linux/kbd_kern.h>
 #include <linux/vt_kern.h>
 #include <linux/kbd_diacr.h>
@@ -703,6 +705,8 @@
 		if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
 			ret = -ENXIO;
 		else {
+			vsa.console = array_index_nospec(vsa.console,
+							 MAX_NR_CONSOLES + 1);
 			vsa.console--;
 			console_lock();
 			ret = vc_allocate(vsa.console);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index adc0f78..9f00165 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -470,7 +470,7 @@
 
 	set_bit(WDM_RESPONDING, &desc->flags);
 	spin_unlock_irq(&desc->iuspin);
-	rv = usb_submit_urb(desc->response, GFP_ATOMIC);
+	rv = usb_submit_urb(desc->response, GFP_KERNEL);
 	spin_lock_irq(&desc->iuspin);
 	if (rv) {
 		dev_err(&desc->intf->dev,
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 893ebae..988240e 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1450,10 +1450,13 @@
 	struct async *as = NULL;
 	struct usb_ctrlrequest *dr = NULL;
 	unsigned int u, totlen, isofrmlen;
-	int i, ret, is_in, num_sgs = 0, ifnum = -1;
+	int i, ret, num_sgs = 0, ifnum = -1;
 	int number_of_packets = 0;
 	unsigned int stream_id = 0;
 	void *buf;
+	bool is_in;
+	bool allow_short = false;
+	bool allow_zero = false;
 	unsigned long mask =	USBDEVFS_URB_SHORT_NOT_OK |
 				USBDEVFS_URB_BULK_CONTINUATION |
 				USBDEVFS_URB_NO_FSBR |
@@ -1487,6 +1490,8 @@
 	u = 0;
 	switch (uurb->type) {
 	case USBDEVFS_URB_TYPE_CONTROL:
+		if (is_in)
+			allow_short = true;
 		if (!usb_endpoint_xfer_control(&ep->desc))
 			return -EINVAL;
 		/* min 8 byte setup packet */
@@ -1527,6 +1532,10 @@
 		break;
 
 	case USBDEVFS_URB_TYPE_BULK:
+		if (!is_in)
+			allow_zero = true;
+		else
+			allow_short = true;
 		switch (usb_endpoint_type(&ep->desc)) {
 		case USB_ENDPOINT_XFER_CONTROL:
 		case USB_ENDPOINT_XFER_ISOC:
@@ -1547,6 +1556,10 @@
 		if (!usb_endpoint_xfer_int(&ep->desc))
 			return -EINVAL;
  interrupt_urb:
+		if (!is_in)
+			allow_zero = true;
+		else
+			allow_short = true;
 		break;
 
 	case USBDEVFS_URB_TYPE_ISO:
@@ -1691,16 +1704,21 @@
 	u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
 	if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
 		u |= URB_ISO_ASAP;
-	if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+	if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
 		u |= URB_SHORT_NOT_OK;
 	if (uurb->flags & USBDEVFS_URB_NO_FSBR)
 		u |= URB_NO_FSBR;
-	if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+	if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
 		u |= URB_ZERO_PACKET;
 	if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
 		u |= URB_NO_INTERRUPT;
 	as->urb->transfer_flags = u;
 
+	if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
+		dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
+	if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+		dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
+
 	as->urb->transfer_buffer_length = uurb->buffer_length;
 	as->urb->setup_packet = (unsigned char *)dr;
 	dr = NULL;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 5532246..7dae981 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -509,7 +509,6 @@
 	struct device *dev;
 	struct usb_device *udev;
 	int retval = 0;
-	int lpm_disable_error = -ENODEV;
 
 	if (!iface)
 		return -ENODEV;
@@ -530,16 +529,6 @@
 
 	iface->condition = USB_INTERFACE_BOUND;
 
-	/* See the comment about disabling LPM in usb_probe_interface(). */
-	if (driver->disable_hub_initiated_lpm) {
-		lpm_disable_error = usb_unlocked_disable_lpm(udev);
-		if (lpm_disable_error) {
-			dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
-					__func__, driver->name);
-			return -ENOMEM;
-		}
-	}
-
 	/* Claimed interfaces are initially inactive (suspended) and
 	 * runtime-PM-enabled, but only if the driver has autosuspend
 	 * support.  Otherwise they are marked active, to prevent the
@@ -558,9 +547,20 @@
 	if (device_is_registered(dev))
 		retval = device_bind_driver(dev);
 
-	/* Attempt to re-enable USB3 LPM, if the disable was successful. */
-	if (!lpm_disable_error)
-		usb_unlocked_enable_lpm(udev);
+	if (retval) {
+		dev->driver = NULL;
+		usb_set_intfdata(iface, NULL);
+		iface->needs_remote_wakeup = 0;
+		iface->condition = USB_INTERFACE_UNBOUND;
+
+		/*
+		 * Unbound interfaces are always runtime-PM-disabled
+		 * and runtime-PM-suspended
+		 */
+		if (driver->supports_autosuspend)
+			pm_runtime_disable(dev);
+		pm_runtime_set_suspended(dev);
+	}
 
 	return retval;
 }
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index bb2a4fe..82dfc60 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -91,6 +91,8 @@
 	struct usb_interface_cache *intf_cache = NULL;
 	int i;
 
+	if (!config)
+		return NULL;
 	for (i = 0; i < config->desc.bNumInterfaces; i++) {
 		if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
 				== iface_num) {
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index dcc183d..359c09a 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -3735,6 +3735,7 @@
 	if (cpu_to_affin)
 		unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
 put_dwc3:
+	platform_device_put(mdwc->dwc3);
 	if (mdwc->bus_perf_client)
 		msm_bus_scale_unregister_client(mdwc->bus_perf_client);
 
@@ -3788,6 +3789,7 @@
 
 	if (mdwc->hs_phy)
 		mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+	platform_device_put(mdwc->dwc3);
 	of_platform_depopulate(&pdev->dev);
 
 	pm_runtime_disable(mdwc->dev);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index e8e5c32..70b3a66 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3632,6 +3632,13 @@
 	/* Endpoint IRQ, handle it and return early */
 	if (event->type.is_devspec == 0) {
 		/* depevt */
+		/* If remote-wakeup attempt by device had failed, then core
+		 * wouldn't give wakeup event after resume. Handle that
+		 * here on ep event which indicates that bus is resumed.
+		 */
+		if (dwc->b_suspend &&
+		    dwc3_get_link_state(dwc) == DWC3_LINK_STATE_U0)
+			dwc3_gadget_wakeup_interrupt(dwc, false);
 		return dwc3_endpoint_interrupt(dwc, &event->depevt);
 	}
 
diff --git a/drivers/usb/gadget/function/f_ipc.c b/drivers/usb/gadget/function/f_ipc.c
index b6ce9c9..a79a559 100644
--- a/drivers/usb/gadget/function/f_ipc.c
+++ b/drivers/usb/gadget/function/f_ipc.c
@@ -407,7 +407,6 @@
 			break;
 
 		ctxt->current_state = IPC_CONNECTED;
-		ctxt->online = 1;
 		ctxt->pdev = platform_device_alloc("ipc_bridge", -1);
 		if (!ctxt->pdev)
 			goto pdev_fail;
@@ -431,9 +430,9 @@
 		if (ctxt->connected)
 			break;
 
-		platform_device_unregister(ctxt->pdev);
 		ctxt->current_state = IPC_DISCONNECTED;
 		wake_up(&ctxt->state_wq);
+		platform_device_unregister(ctxt->pdev);
 		break;
 	default:
 		pr_debug("%s: Unknown current state\n", __func__);
@@ -442,7 +441,6 @@
 	return;
 
 pdev_fail:
-	ctxt->online = 0;
 	ctxt->current_state = IPC_DISCONNECTED;
 	return;
 }
@@ -591,6 +589,7 @@
 
 	spin_lock_irqsave(&ctxt->lock, flags);
 	ctxt->connected = 1;
+	ctxt->online = 1;
 	spin_unlock_irqrestore(&ctxt->lock, flags);
 	schedule_work(&ctxt->func_work);
 
@@ -604,6 +603,7 @@
 
 	pr_debug("%s: Disabling\n", __func__);
 	spin_lock_irqsave(&ctxt->lock, flags);
+	ctxt->online = 0;
 	ctxt->connected = 0;
 	spin_unlock_irqrestore(&ctxt->lock, flags);
 	schedule_work(&ctxt->func_work);
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
index 7af152b3..6712ca2 100644
--- a/drivers/usb/gadget/function/u_data_ipa.c
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -451,8 +451,9 @@
 
 	/* update IPA Parameteres here. */
 	port->ipa_params.usb_connection_speed = gadget->speed;
-	port->ipa_params.reset_pipe_after_lpm =
-				msm_dwc3_reset_ep_after_lpm(gadget);
+	if (!gadget->is_chipidea)
+		port->ipa_params.reset_pipe_after_lpm =
+			msm_dwc3_reset_ep_after_lpm(gadget);
 	port->ipa_params.skip_ep_cfg = true;
 	port->ipa_params.keep_ipa_awake = true;
 	port->ipa_params.cons_clnt_hdl = -1;
@@ -469,19 +470,29 @@
 				__func__);
 			goto out;
 		}
-
-		sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+		if (!gadget->is_chipidea) {
+			sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
 				| MSM_PRODUCER | port->src_pipe_idx;
-		port->rx_req->length = 32*1024;
-		port->rx_req->udc_priv = sps_params;
-		configure_fifo(port->usb_bam_type,
-				port->src_connection_idx,
-				port->port_usb->out);
-		ret = msm_ep_config(gport->out, port->rx_req);
-		if (ret) {
-			pr_err("msm_ep_config() failed for OUT EP\n");
-			spin_unlock_irqrestore(&port->port_lock, flags);
-			goto out;
+			port->rx_req->length = 32*1024;
+			port->rx_req->udc_priv = sps_params;
+			configure_fifo(port->usb_bam_type,
+					port->src_connection_idx,
+					port->port_usb->out);
+			ret = msm_ep_config(gport->out, port->rx_req);
+			if (ret) {
+				pr_err("msm_ep_config() failed for OUT EP\n");
+				spin_unlock_irqrestore(&port->port_lock, flags);
+				goto out;
+			}
+		} else {
+			/* gadget->is_chipidea */
+			get_bam2bam_connection_info(port->usb_bam_type,
+					port->src_connection_idx,
+					&port->src_pipe_idx,
+					NULL, NULL, NULL);
+			sps_params = (MSM_SPS_MODE | port->src_pipe_idx |
+				MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+			port->rx_req->udc_priv = sps_params;
 		}
 	}
 
@@ -496,17 +507,29 @@
 				__func__);
 			goto unconfig_msm_ep_out;
 		}
-		sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
-						port->dst_pipe_idx;
-		port->tx_req->length = 32*1024;
-		port->tx_req->udc_priv = sps_params;
-		configure_fifo(port->usb_bam_type,
-				port->dst_connection_idx, gport->in);
-		ret = msm_ep_config(gport->in, port->tx_req);
-		if (ret) {
-			pr_err("msm_ep_config() failed for IN EP\n");
-			spin_unlock_irqrestore(&port->port_lock, flags);
-			goto unconfig_msm_ep_out;
+		if (!gadget->is_chipidea) {
+			sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
+				port->dst_pipe_idx;
+			port->tx_req->length = 32*1024;
+			port->tx_req->udc_priv = sps_params;
+			configure_fifo(port->usb_bam_type,
+					port->dst_connection_idx, gport->in);
+
+			ret = msm_ep_config(gport->in, port->tx_req);
+			if (ret) {
+				pr_err("msm_ep_config() failed for IN EP\n");
+				spin_unlock_irqrestore(&port->port_lock, flags);
+				goto unconfig_msm_ep_out;
+			}
+		} else {
+			/* gadget->is_chipidea */
+			get_bam2bam_connection_info(port->usb_bam_type,
+					port->dst_connection_idx,
+					&port->dst_pipe_idx,
+					NULL, NULL, NULL);
+			sps_params = (MSM_SPS_MODE | port->dst_pipe_idx |
+				MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+			port->tx_req->udc_priv = sps_params;
 		}
 	}
 
@@ -1163,8 +1186,8 @@
 		spin_unlock_irqrestore(&port->port_lock, flags);
 		msm_dwc3_reset_dbm_ep(port->port_usb->in);
 		spin_lock_irqsave(&port->port_lock, flags);
-		usb_bam_resume(port->usb_bam_type, &port->ipa_params);
 	}
+	usb_bam_resume(port->usb_bam_type, &port->ipa_params);
 
 exit:
 	spin_unlock_irqrestore(&port->port_lock, flags);
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 6ba122c..95df2b3 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1066,12 +1066,15 @@
 static int fotg210_udc_remove(struct platform_device *pdev)
 {
 	struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
+	int i;
 
 	usb_del_gadget_udc(&fotg210->gadget);
 	iounmap(fotg210->reg);
 	free_irq(platform_get_irq(pdev, 0), fotg210);
 
 	fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+	for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+		kfree(fotg210->ep[i]);
 	kfree(fotg210);
 
 	return 0;
@@ -1102,7 +1105,7 @@
 	/* initialize udc */
 	fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
 	if (fotg210 == NULL)
-		goto err_alloc;
+		goto err;
 
 	for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
 		_ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
@@ -1114,7 +1117,7 @@
 	fotg210->reg = ioremap(res->start, resource_size(res));
 	if (fotg210->reg == NULL) {
 		pr_err("ioremap error.\n");
-		goto err_map;
+		goto err_alloc;
 	}
 
 	spin_lock_init(&fotg210->lock);
@@ -1162,7 +1165,7 @@
 	fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
 				GFP_KERNEL);
 	if (fotg210->ep0_req == NULL)
-		goto err_req;
+		goto err_map;
 
 	fotg210_init(fotg210);
 
@@ -1190,12 +1193,14 @@
 	fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
 
 err_map:
-	if (fotg210->reg)
-		iounmap(fotg210->reg);
+	iounmap(fotg210->reg);
 
 err_alloc:
+	for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+		kfree(fotg210->ep[i]);
 	kfree(fotg210);
 
+err:
 	return ret;
 }
 
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 643e087..da89f3f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1257,16 +1257,17 @@
 				break;
 			}
 
-			/* Software should not attempt to set
-			 * port link state above '3' (U3) and the port
-			 * must be enabled.
-			 */
-			if ((temp & PORT_PE) == 0 ||
-				(link_state > USB_SS_PORT_LS_U3)) {
-				xhci_warn(xhci, "Cannot set link state.\n");
+			/* Port must be enabled */
+			if (!(temp & PORT_PE)) {
+				retval = -ENODEV;
+				break;
+			}
+			/* Can't set port link state above '3' (U3) */
+			if (link_state > USB_SS_PORT_LS_U3) {
+				xhci_warn(xhci, "Cannot set port %d link state %d\n",
+					 wIndex, link_state);
 				goto error;
 			}
-
 			if (link_state == USB_SS_PORT_LS_U3) {
 				slot_id = xhci_find_slot_id_by_port(hcd, xhci,
 						wIndex + 1);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index ce9e457..c108758 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -735,10 +735,10 @@
 	xhci_mtk_host_enable(mtk);
 
 	xhci_dbg(xhci, "%s: restart port polling\n", __func__);
-	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-	usb_hcd_poll_rh_status(hcd);
 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
 	usb_hcd_poll_rh_status(xhci->shared_hcd);
+	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	usb_hcd_poll_rh_status(hcd);
 	return 0;
 }
 
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f6782a3..b514055 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -179,6 +179,8 @@
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
 	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
 	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
 	     pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
 		xhci->quirks |= XHCI_MISSING_CAS;
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index e36c6c6..1e67234 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -423,6 +423,9 @@
 	spin_unlock_irqrestore(&dev->lock, flags);
 	mutex_unlock(&dev->io_mutex);
 
+	if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
+		return -EIO;
+
 	return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
 }
 
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 813035f..7d25267 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -408,12 +408,20 @@
 			  transfer_buffer_length,
 			  KOBIL_TIMEOUT);
 
-	dev_dbg(&port->dev, "%s - Send get_status_line_state URB returns: %i. Statusline: %02x\n",
-		__func__, result, transfer_buffer[0]);
+	dev_dbg(&port->dev, "Send get_status_line_state URB returns: %i\n",
+			result);
+	if (result < 1) {
+		if (result >= 0)
+			result = -EIO;
+		goto out_free;
+	}
+
+	dev_dbg(&port->dev, "Statusline: %02x\n", transfer_buffer[0]);
 
 	result = 0;
 	if ((transfer_buffer[0] & SUSBCR_GSL_DSR) != 0)
 		result = TIOCM_DSR;
+out_free:
 	kfree(transfer_buffer);
 	return result;
 }
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 2674da4..6d6acf2 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -87,7 +87,8 @@
 
 /* Motorola Tetra driver */
 #define MOTOROLA_TETRA_IDS()			\
-	{ USB_DEVICE(0x0cad, 0x9011) }	/* Motorola Solutions TETRA PEI */
+	{ USB_DEVICE(0x0cad, 0x9011) },	/* Motorola Solutions TETRA PEI */ \
+	{ USB_DEVICE(0x0cad, 0x9012) }	/* MTP6550 */
 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
 
 /* Novatel Wireless GPS driver */
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 8c9421b..6bf86ca 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -230,7 +230,7 @@
 
 	result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
 				    0, secd, sizeof(*secd));
-	if (result < sizeof(*secd)) {
+	if (result < (int)sizeof(*secd)) {
 		dev_err(dev, "Can't read security descriptor or "
 			"not enough data: %d\n", result);
 		goto out;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 9a53912..5d3ba74 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -873,6 +873,7 @@
 error_rc_add:
 	usb_put_intf(iface);
 	usb_put_dev(hwarc->usb_dev);
+	kfree(hwarc);
 error_alloc:
 	uwb_rc_put(uwb_rc);
 error_rc_alloc:
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index 63c4842..46e0e8b 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -332,6 +332,8 @@
 extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
 extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
 
+extern const u8 aty_postdividers[8];
+
 
     /*
      *  Hardware cursor support
@@ -358,7 +360,6 @@
 
 extern void aty_reset_engine(const struct atyfb_par *par);
 extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
-extern u8   aty_ld_pll_ct(int offset, const struct atyfb_par *par);
 
 void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
 void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 81367cf..da748c3 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3093,17 +3093,18 @@
 		/*
 		 * PLL Reference Divider M:
 		 */
-		M = pll_regs[2];
+		M = pll_regs[PLL_REF_DIV];
 
 		/*
 		 * PLL Feedback Divider N (Dependent on CLOCK_CNTL):
 		 */
-		N = pll_regs[7 + (clock_cntl & 3)];
+		N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
 
 		/*
 		 * PLL Post Divider P (Dependent on CLOCK_CNTL):
 		 */
-		P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
+		P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
+		                     ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
 
 		/*
 		 * PLL Divider Q:
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index 51f29d6..af54256 100644
--- a/drivers/video/fbdev/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
@@ -114,7 +114,7 @@
  */
 
 #define Maximum_DSP_PRECISION 7
-static u8 postdividers[] = {1,2,4,8,3};
+const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
 
 static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
 {
@@ -221,7 +221,7 @@
 		pll->vclk_post_div += (q <  64*8);
 		pll->vclk_post_div += (q <  32*8);
 	}
-	pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
+	pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
 	//    pll->vclk_post_div <<= 6;
 	pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
 	pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
@@ -512,7 +512,7 @@
 		u8 mclk_fb_div, pll_ext_cntl;
 		pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
 		pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
-		pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
+		pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
 		mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
 		if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
 			mclk_fb_div <<= 1;
@@ -534,7 +534,7 @@
 		xpost_div += (q <  64*8);
 		xpost_div += (q <  32*8);
 	}
-	pll->ct.xclk_post_div_real = postdividers[xpost_div];
+	pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
 	pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
 
 #ifdef CONFIG_PPC
@@ -583,7 +583,7 @@
 			mpost_div += (q <  64*8);
 			mpost_div += (q <  32*8);
 		}
-		sclk_post_div_real = postdividers[mpost_div];
+		sclk_post_div_real = aty_postdividers[mpost_div];
 		pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
 		pll->ct.spll_cntl2 = mpost_div << 4;
 #ifdef DEBUG
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
index defec0b..0108a13 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.c
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -151,16 +151,9 @@
 	return blocking_notifier_call_chain(&ses->notifier_head, event, ses);
 }
 
-static void mdp3_dispatch_dma_done(struct kthread_work *work)
+static void __mdp3_dispatch_dma_done(struct mdp3_session_data *session)
 {
-	struct mdp3_session_data *session;
-	int cnt = 0;
-
-	pr_debug("%s\n", __func__);
-	session = container_of(work, struct mdp3_session_data,
-				dma_done_work);
-	if (!session)
-		return;
+	int cnt;
 
 	cnt = atomic_read(&session->dma_done_cnt);
 	MDSS_XLOG(cnt);
@@ -171,6 +164,29 @@
 	}
 }
 
+void mdp3_flush_dma_done(struct mdp3_session_data *session)
+{
+	if (!session)
+		return;
+
+	pr_debug("%s\n", __func__);
+
+	__mdp3_dispatch_dma_done(session);
+}
+
+static void mdp3_dispatch_dma_done(struct kthread_work *work)
+{
+	struct mdp3_session_data *session;
+
+	pr_debug("%s\n", __func__);
+	session = container_of(work, struct mdp3_session_data,
+				dma_done_work);
+	if (!session)
+		return;
+
+	__mdp3_dispatch_dma_done(session);
+}
+
 static void mdp3_dispatch_clk_off(struct work_struct *work)
 {
 	struct mdp3_session_data *session;
@@ -869,6 +885,13 @@
 	te.hw_vsync_mode = panel_info->mipi.hw_vsync_mode;
 	te.tear_check_en = panel_info->te.tear_check_en;
 	te.sync_cfg_height = panel_info->te.sync_cfg_height;
+
+	/* For mdp3, max. value of CFG_HEIGHT is 0x7ff,
+	 * for mdp5, max. value of CFG_HEIGHT is 0xffff.
+	 */
+	if (te.sync_cfg_height > 0x7ff)
+		te.sync_cfg_height = 0x7ff;
+
 	te.vsync_init_val = panel_info->te.vsync_init_val;
 	te.sync_threshold_start = panel_info->te.sync_threshold_start;
 	te.sync_threshold_continue = panel_info->te.sync_threshold_continue;
@@ -3060,6 +3083,7 @@
 		pr_err("fail to init dma\n");
 		goto init_done;
 	}
+	mdp3_session->dma->session = mdp3_session;
 
 	intf_type = mdp3_ctrl_get_intf_type(mfd);
 	mdp3_session->intf = mdp3_get_display_intf(intf_type);
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.h b/drivers/video/fbdev/msm/mdp3_ctrl.h
index 64be356..de90127 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.h
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.h
@@ -91,5 +91,6 @@
 int mdp3_ctrl_get_pack_pattern(u32 imgType);
 int mdp3_ctrl_reset(struct msm_fb_data_type *mfd);
 int mdp3_get_ion_client(struct msm_fb_data_type *mfd);
+void mdp3_flush_dma_done(struct mdp3_session_data *mdp3_session);
 
 #endif /* MDP3_CTRL_H */
diff --git a/drivers/video/fbdev/msm/mdp3_dma.c b/drivers/video/fbdev/msm/mdp3_dma.c
index c9356bd..e569a0b 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.c
+++ b/drivers/video/fbdev/msm/mdp3_dma.c
@@ -16,6 +16,7 @@
 #include "mdp3_dma.h"
 #include "mdp3_hwio.h"
 #include "mdss_debug.h"
+#include "mdp3_ctrl.h"
 
 #define DMA_STOP_POLL_SLEEP_US 1000
 #define DMA_STOP_POLL_TIMEOUT_US 200000
@@ -294,7 +295,7 @@
 
 	vsync_clk_speed_hz = MDP_VSYNC_CLK_RATE;
 
-	cfg = total_lines << VSYNC_TOTAL_LINES_SHIFT;
+	cfg = te->sync_cfg_height << VSYNC_TOTAL_LINES_SHIFT;
 	total_lines *= te->frame_rate;
 
 	vclks_line = (total_lines) ? vsync_clk_speed_hz / total_lines : 0;
@@ -1080,6 +1081,16 @@
 
 	reinit_completion(&dma->dma_comp);
 	dma->vsync_client.handler = NULL;
+
+	/*
+	 * Interrupts are disabled.
+	 * Check for blocked dma done interrupt.
+	 * Flush items waiting for dma done interrupt.
+	 */
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD &&
+		atomic_read(&dma->session->dma_done_cnt))
+		mdp3_flush_dma_done(dma->session);
+
 	return ret;
 }
 
diff --git a/drivers/video/fbdev/msm/mdp3_dma.h b/drivers/video/fbdev/msm/mdp3_dma.h
index e0458f8..03d3cf0 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.h
+++ b/drivers/video/fbdev/msm/mdp3_dma.h
@@ -293,6 +293,8 @@
 	struct fb_cmap *gc_cmap;
 	struct fb_cmap *hist_cmap;
 
+	struct mdp3_session_data *session;
+
 	bool (*busy)(void);
 
 	int (*dma_config)(struct mdp3_dma *dma,
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 63f8553..6a7b7dd 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -1908,7 +1908,7 @@
 	ret = mfd->mdp.off_fnc(mfd);
 	if (ret)
 		mfd->panel_power_state = cur_power_state;
-	else if (mdss_panel_is_power_off(req_power_state))
+	else if (!mdss_panel_is_power_on_interactive(req_power_state))
 		mdss_fb_release_fences(mfd);
 	mfd->op_enable = true;
 	complete(&mfd->power_off_comp);
@@ -3704,6 +3704,19 @@
 	int ret = -ENOTSUPP;
 	u32 new_dsi_mode, dynamic_dsi_switch = 0;
 
+	if (mfd->panel_info->panel_dead) {
+		pr_debug("Panel dead, Signal fence and exit commit\n");
+		/*
+		 * In case of ESD attack, return early from commit
+		 * after signalling fences.
+		 */
+		mdss_fb_release_kickoff(mfd);
+		mdss_fb_signal_timeline(sync_pt_data);
+		if ((mfd->panel.type == MIPI_CMD_PANEL) &&
+			(mfd->mdp.signal_retire_fence))
+			mfd->mdp.signal_retire_fence(mfd, 1);
+		return ret;
+	}
 	if (!sync_pt_data->async_wait_fences)
 		mdss_fb_wait_for_fence(sync_pt_data);
 	sync_pt_data->flushed = false;
@@ -4583,7 +4596,6 @@
 	struct mdp_frc_info *frc_info = NULL;
 	struct mdp_frc_info __user *frc_info_user;
 	struct msm_fb_data_type *mfd;
-	struct mdss_overlay_private *mdp5_data = NULL;
 
 	ret = copy_from_user(&commit, argp, sizeof(struct mdp_layer_commit));
 	if (ret) {
@@ -4595,26 +4607,6 @@
 	if (!mfd)
 		return -EINVAL;
 
-	mdp5_data = mfd_to_mdp5_data(mfd);
-
-	if (mfd->panel_info->panel_dead) {
-		pr_debug("early commit return\n");
-		MDSS_XLOG(mfd->panel_info->panel_dead);
-		/*
-		 * In case of an ESD attack, since we early return from the
-		 * commits, we need to signal the outstanding fences.
-		 */
-		mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
-		atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
-		mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
-		mdss_fb_release_fences(mfd);
-		if ((mfd->panel.type == MIPI_CMD_PANEL) &&
-			mfd->mdp.signal_retire_fence && mdp5_data)
-			mfd->mdp.signal_retire_fence(mfd,
-						mdp5_data->retire_cnt);
-		return 0;
-	}
-
 	output_layer_user = commit.commit_v1.output_layer;
 	if (output_layer_user) {
 		buffer_size = sizeof(struct mdp_output_layer);
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index ef69273..a3edb20 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -496,6 +496,9 @@
 	if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
 		return -EFAULT;
 
+	if (mr->w > 4096 || mr->h > 4096)
+		return -EINVAL;
+
 	if (mr->w * mr->h * 3 > mr->buffer_size)
 		return -EINVAL;
 
@@ -509,7 +512,7 @@
 			mr->x, mr->y, mr->w, mr->h);
 
 	if (r > 0) {
-		if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+		if (copy_to_user(mr->buffer, buf, r))
 			r = -EFAULT;
 	}
 
diff --git a/drivers/vservices/Kconfig b/drivers/vservices/Kconfig
new file mode 100644
index 0000000..16b3bda
--- /dev/null
+++ b/drivers/vservices/Kconfig
@@ -0,0 +1,81 @@
+#
+# OKL4 Virtual Services framework
+#
+
+menuconfig VSERVICES_SUPPORT
+	tristate "OKL4 Virtual Services support"
+	default OKL4_GUEST || OKL4_VIRTUALISATION
+	select HOTPLUG
+	help
+	  This option adds core support for OKL4 Virtual Services. The Virtual
+	  Services framework is an inter-OS device/service sharing
+	  protocol which is supported on OKL4 Microvisor virtualization
+	  platforms. You will also need drivers from the following menu in
+	  order to make use of it.
+
+if VSERVICES_SUPPORT
+
+config VSERVICES_CHAR_DEV
+	bool "Virtual Services user-space service API"
+	default y
+	help
+	  Select this if you want to use user-space service drivers. You will
+	  also need udev rules that create device nodes, and protocol code
+	  generated by the OK Mill tool.
+
+config VSERVICES_DEBUG
+	bool "Virtual Services debugging support"
+	help
+	  Select this if you want to enable Virtual Services core framework
+	  debugging. The debug messages for various components of the Virtual
+	  Services core framework can be toggled at runtime on a per-session
+	  basis via sysfs. When Virtual Services debugging is enabled here,
+	  but disabled at runtime it has a minimal performance impact.
+
+config VSERVICES_LOCK_DEBUG
+	bool "Debug Virtual Services state locks"
+	default DEBUG_KERNEL
+	help
+	  This option enables some runtime checks that Virtual Services
+	  state lock functions are used correctly in service drivers.
+
+config VSERVICES_SERVER
+	tristate "Virtual Services server support"
+	depends on SYSFS
+	default y
+	help
+	  This option adds support for Virtual Services servers, which allows
+	  exporting of services from this Linux to other environments. Servers
+	  are created at runtime by writing to files in
+	  /sys/bus/vservices-server.
+
+config VSERVICES_CLIENT
+	tristate "Virtual Services client support"
+	default y
+	help
+	  This option adds support for Virtual Services clients, which allows
+	  connecting to services exported from other environments.
+
+config VSERVICES_SKELETON_DRIVER
+	tristate "Virtual Services skeleton driver"
+	depends on VSERVICES_SERVER || VSERVICES_CLIENT
+	default n
+	help
+	  This option adds support for a skeleton virtual service driver. This
+	  driver can be used for templating or testing of virtual service
+	  drivers. If unsure say N.
+
+config VSERVICES_NAMED_DEVICE
+	bool "Virtual Services use named device node in /dev"
+	default n
+	help
+	  Select this if you want to use a named device name over a numeric
+	  device name in /dev
+
+source "drivers/vservices/transport/Kconfig"
+
+source "drivers/vservices/protocol/Kconfig"
+
+source "drivers/vservices/Kconfig.stacks"
+
+endif # VSERVICES_SUPPORT
diff --git a/drivers/vservices/Kconfig.stacks b/drivers/vservices/Kconfig.stacks
new file mode 100644
index 0000000..97eba53
--- /dev/null
+++ b/drivers/vservices/Kconfig.stacks
@@ -0,0 +1,7 @@
+#
+# vServices drivers configuration
+#
+
+menu "Client and Server drivers"
+
+endmenu
diff --git a/drivers/vservices/Makefile b/drivers/vservices/Makefile
new file mode 100644
index 0000000..685ba0a
--- /dev/null
+++ b/drivers/vservices/Makefile
@@ -0,0 +1,16 @@
+ccflags-y += -Werror
+ccflags-$(CONFIG_VSERVICES_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VSERVICES_SUPPORT)	+= vservices.o
+vservices-objs-$(CONFIG_VSERVICES_CHAR_DEV) += devio.o
+vservices-objs = session.o $(vservices-objs-y)
+
+obj-$(CONFIG_VSERVICES_CLIENT) += core_client.o
+obj-$(CONFIG_VSERVICES_SERVER) += core_server.o
+
+obj-$(CONFIG_VSERVICES_SKELETON_DRIVER) += vservices_skeleton_driver.o
+vservices_skeleton_driver-objs = skeleton_driver.o
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += transport/
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += protocol/
diff --git a/drivers/vservices/compat.h b/drivers/vservices/compat.h
new file mode 100644
index 0000000..5f6926d
--- /dev/null
+++ b/drivers/vservices/compat.h
@@ -0,0 +1,59 @@
+/*
+ * drivers/vservices/compat.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Wrapper functions/definitions for compatibility between differnet kernel
+ * versions.
+ */
+
+#ifndef _VSERVICES_COMPAT_H
+#define _VSERVICES_COMPAT_H
+
+#include <linux/workqueue.h>
+#include <linux/version.h>
+
+/* The INIT_WORK_ONSTACK macro has a slightly different name in older kernels */
+#ifndef INIT_WORK_ONSTACK
+#define INIT_WORK_ONSTACK(_work, _func) INIT_WORK_ON_STACK(_work, _func)
+#endif
+
+/*
+ * We require a workqueue with  no concurrency. This is provided by
+ * create_singlethread_workqueue() in kernel prior to 2.6.36.
+ * In later versions, create_singlethread_workqueue() enables WQ_MEM_RECLAIM and
+ * thus WQ_RESCUER, which allows work items to be grabbed by a rescuer thread
+ * and run concurrently if the queue is running too slowly. We must use
+ * alloc_ordered_workqueue() instead, to disable the rescuer.
+ */
+static inline struct workqueue_struct *
+vs_create_workqueue(const char *name)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+	return create_singlethread_workqueue(name);
+#else
+	return alloc_ordered_workqueue(name, 0);
+#endif
+}
+
+/*
+ * The max3 macro has only been present from 2.6.37
+ * (commit: f27c85c56b32c42bcc54a43189c1e00fdceb23ec)
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
+#define max3(x, y, z) ({			\
+	typeof(x) _max1 = (x);			\
+	typeof(y) _max2 = (y);			\
+	typeof(z) _max3 = (z);			\
+	(void) (&_max1 == &_max2);		\
+	(void) (&_max1 == &_max3);		\
+	_max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
+		(_max2 > _max3 ? _max2 : _max3); })
+#endif
+
+#endif /* _VSERVICES_COMPAT_H */
diff --git a/drivers/vservices/core_client.c b/drivers/vservices/core_client.c
new file mode 100644
index 0000000..4cc78ac
--- /dev/null
+++ b/drivers/vservices/core_client.c
@@ -0,0 +1,733 @@
+/*
+ * drivers/vservices/core_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Client side core service application driver. This is responsible for:
+ *
+ *  - automatically connecting to the server when it becomes ready;
+ *  - sending a reset command to the server if something has gone wrong; and
+ *  - enumerating all the available services.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <vservices/types.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/client.h>
+
+#include "session.h"
+#include "transport.h"
+#include "compat.h"
+
+struct core_client {
+	struct vs_client_core_state	state;
+	struct vs_service_device	*service;
+
+	struct list_head		message_queue;
+	struct mutex			message_queue_lock;
+	struct work_struct		message_queue_work;
+};
+
+struct pending_reset {
+	struct vs_service_device	*service;
+	struct list_head		list;
+};
+
+#define to_core_client(x)	container_of(x, struct core_client, state)
+#define dev_to_core_client(x)	to_core_client(dev_get_drvdata(x))
+
+static int vs_client_core_fatal_error(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+
+	/* Force a transport level reset */
+	dev_err(&client->service->dev," Fatal error - resetting session\n");
+	return -EPROTO;
+}
+
+static struct core_client *
+vs_client_session_core_client(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service = session->core_service;
+
+	if (!core_service)
+		return NULL;
+
+	return dev_to_core_client(&core_service->dev);
+}
+
+static ssize_t client_core_reset_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *core_service = to_vs_service_device(dev);
+	struct vs_session_device *session =
+		vs_service_get_session(core_service);
+	struct vs_service_device *target;
+	vs_service_id_t service_id;
+	unsigned long val;
+	int err;
+
+	/* Writing a valid service id to this file resets that service */
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	service_id = val;
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -ENODEV;
+
+	err = vs_service_reset(target, core_service);
+
+	vs_put_service(target);
+	return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR(reset_service, S_IWUSR, NULL,
+		client_core_reset_service_store);
+
+static struct attribute *client_core_dev_attrs[] = {
+	&dev_attr_reset_service.attr,
+	NULL,
+};
+
+static const struct attribute_group client_core_attr_group = {
+	.attrs = client_core_dev_attrs,
+};
+
+/*
+ * Protocol callbacks
+ */
+static int
+vs_client_core_handle_service_removed(struct vs_client_core_state *state,
+		u32 service_id)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+			vs_service_get_session(client->service);
+	struct vs_service_device *service;
+	int ret;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service)
+		return -EINVAL;
+
+	ret = vs_service_handle_delete(service);
+	vs_put_service(service);
+	return ret;
+}
+
+static int vs_client_core_create_service(struct core_client *client,
+		struct vs_session_device *session, vs_service_id_t service_id,
+		struct vs_string *protocol_name_string,
+		struct vs_string *service_name_string)
+{
+	char *protocol_name, *service_name;
+	struct vs_service_device *service;
+	int ret = 0;
+
+	protocol_name = vs_string_dup(protocol_name_string, GFP_KERNEL);
+	if (!protocol_name) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	service_name = vs_string_dup(service_name_string, GFP_KERNEL);
+	if (!service_name) {
+		ret = -ENOMEM;
+		goto out_free_protocol_name;
+	}
+
+	service = vs_service_register(session, client->service, service_id,
+			protocol_name, service_name, NULL);
+	if (IS_ERR(service)) {
+		ret = PTR_ERR(service);
+		goto out_free_service_name;
+	}
+
+	vs_service_start(service);
+
+out_free_service_name:
+	kfree(service_name);
+out_free_protocol_name:
+	kfree(protocol_name);
+out:
+	return ret;
+}
+
+static int
+vs_client_core_handle_service_created(struct vs_client_core_state *state,
+		u32 service_id, struct vs_string service_name,
+		struct vs_string protocol_name, struct vs_mbuf *mbuf)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+			vs_service_get_session(client->service);
+	int err;
+
+	vs_dev_debug(VS_DEBUG_CLIENT_CORE,
+			vs_service_get_session(client->service),
+			&client->service->dev, "Service info for %d received\n",
+			service_id);
+
+	err = vs_client_core_create_service(client, session, service_id,
+			&protocol_name, &service_name);
+	if (err)
+		dev_err(&session->dev,
+				"Failed to create service with id %d: %d\n",
+				service_id, err);
+
+	vs_client_core_core_free_service_created(state, &service_name,
+			&protocol_name, mbuf);
+
+	return err;
+}
+
+static int
+vs_client_core_send_service_reset(struct core_client *client,
+		struct vs_service_device *service)
+{
+	return vs_client_core_core_send_service_reset(&client->state,
+			service->id, GFP_KERNEL);
+}
+
+static int
+vs_client_core_queue_service_reset(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_client *client =
+		vs_client_session_core_client(session);
+	struct pending_reset *msg;
+
+	if (!client)
+		return -ENODEV;
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+			"Sending reset for service %d\n", service->id);
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	mutex_lock(&client->message_queue_lock);
+
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+	list_add_tail(&msg->list, &client->message_queue);
+
+	mutex_unlock(&client->message_queue_lock);
+	queue_work(client->service->work_queue, &client->message_queue_work);
+
+	return 0;
+}
+
+static int vs_core_client_tx_ready(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+
+	queue_work(client->service->work_queue, &client->message_queue_work);
+
+	return 0;
+}
+
+static void message_queue_work(struct work_struct *work)
+{
+	struct core_client *client = container_of(work, struct core_client,
+			message_queue_work);
+	struct vs_session_device *session =
+		vs_service_get_session(client->service);
+	struct pending_reset *msg;
+	int err;
+
+	vs_service_state_lock(client->service);
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(client->state.state.core)) {
+		vs_service_state_unlock(client->service);
+		return;
+	}
+
+	vs_dev_debug(VS_DEBUG_CLIENT, session, &session->dev, "tx_ready\n");
+
+	mutex_lock(&client->message_queue_lock);
+	while (!list_empty(&client->message_queue)) {
+		msg = list_first_entry(&client->message_queue,
+				struct pending_reset, list);
+
+		err = vs_client_core_send_service_reset(client, msg->service);
+
+		/* If we're out of quota there's no point continuing */
+		if (err == -ENOBUFS)
+			break;
+
+		/* Any other error is fatal */
+		if (err < 0) {
+			dev_err(&client->service->dev,
+					"Failed to send pending reset for %d (%d) - resetting session",
+					msg->service->id, err);
+			vs_service_reset_nosync(client->service);
+			break;
+		}
+
+		/*
+		 * The message sent successfully - remove it from the queue.
+		 * The corresponding vs_get_service() was done when the pending
+		 * message was enqueued.
+		 */
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+	mutex_unlock(&client->message_queue_lock);
+	vs_service_state_unlock(client->service);
+}
+
+static int
+vs_client_core_handle_server_ready(struct vs_client_core_state *state,
+		u32 service_id, u32 in_quota, u32 out_quota, u32 in_bit_offset,
+		u32 in_num_bits, u32 out_bit_offset, u32 out_num_bits)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session;
+	struct vs_service_device *service;
+	int ret;
+
+	if (service_id == 0)
+		return -EPROTO;
+
+	if (!in_quota || !out_quota)
+		return -EINVAL;
+
+	session = vs_service_get_session(client->service);
+	service = vs_session_get_service(session, service_id);
+	if (!service)
+		return -EINVAL;
+
+	service->send_quota = in_quota;
+	service->recv_quota = out_quota;
+	service->notify_send_offset = in_bit_offset;
+	service->notify_send_bits = in_num_bits;
+	service->notify_recv_offset = out_bit_offset;
+	service->notify_recv_bits = out_num_bits;
+
+	ret = vs_service_enable(service);
+	vs_put_service(service);
+	return ret;
+}
+
+static int
+vs_client_core_handle_service_reset(struct vs_client_core_state *state,
+		u32 service_id)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session;
+
+	if (service_id == 0)
+		return -EPROTO;
+
+	session = vs_service_get_session(client->service);
+
+	return vs_service_handle_reset(session, service_id, true);
+}
+
+static void vs_core_client_start(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+			vs_service_get_session(client->service);
+
+	/* FIXME - start callback should return int */
+	vs_dev_debug(VS_DEBUG_CLIENT_CORE, session, &client->service->dev,
+			"Core client start\n");
+}
+
+static void vs_core_client_reset(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+		vs_service_get_session(client->service);
+	struct pending_reset *msg;
+
+	/* Flush the pending resets - we're about to delete everything */
+	while (!list_empty(&client->message_queue)) {
+		msg = list_first_entry(&client->message_queue,
+				struct pending_reset, list);
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+
+	vs_session_delete_noncore(session);
+
+	/* Return to the initial quotas, until the next startup message */
+	client->service->send_quota = 0;
+	client->service->recv_quota = 1;
+}
+
+static int vs_core_client_startup(struct vs_client_core_state *state,
+		u32 core_in_quota, u32 core_out_quota)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_service_device *service = state->service;
+	struct vs_session_device *session = vs_service_get_session(service);
+	int ret;
+
+	if (!core_in_quota || !core_out_quota)
+		return -EINVAL;
+
+	/*
+	 * Update the service struct with our real quotas and tell the
+	 * transport about the change
+	 */
+
+	service->send_quota = core_in_quota;
+	service->recv_quota = core_out_quota;
+	ret = session->transport->vt->service_start(session->transport, service);
+	if (ret < 0)
+		return ret;
+
+	WARN_ON(!list_empty(&client->message_queue));
+
+	return vs_client_core_core_req_connect(state, GFP_KERNEL);
+}
+
+static struct vs_client_core_state *
+vs_core_client_alloc(struct vs_service_device *service)
+{
+	struct core_client *client;
+	int err;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		goto fail;
+
+	client->service = service;
+	INIT_LIST_HEAD(&client->message_queue);
+	INIT_WORK(&client->message_queue_work, message_queue_work);
+	mutex_init(&client->message_queue_lock);
+
+	err = sysfs_create_group(&service->dev.kobj, &client_core_attr_group);
+	if (err)
+		goto fail_free_client;
+
+	/*
+	 * Default transport resources for the core service client. The
+	 * server will inform us of the real quotas in the startup message.
+	 * Note that it is important that the quotas never decrease, so these
+	 * numbers are as small as possible.
+	 */
+	service->send_quota = 0;
+	service->recv_quota = 1;
+	service->notify_send_bits = 0;
+	service->notify_send_offset = 0;
+	service->notify_recv_bits = 0;
+	service->notify_recv_offset = 0;
+
+	return &client->state;
+
+fail_free_client:
+	kfree(client);
+fail:
+	return NULL;
+}
+
+static void vs_core_client_release(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+
+	sysfs_remove_group(&client->service->dev.kobj, &client_core_attr_group);
+	kfree(client);
+}
+
+static struct vs_client_core vs_core_client_driver = {
+	.alloc		= vs_core_client_alloc,
+	.release	= vs_core_client_release,
+	.start		= vs_core_client_start,
+	.reset		= vs_core_client_reset,
+	.tx_ready	= vs_core_client_tx_ready,
+
+	.core = {
+		.nack_connect		= vs_client_core_fatal_error,
+
+		/* FIXME: Jira ticket SDK-3074 - ryanm. */
+		.ack_disconnect		= vs_client_core_fatal_error,
+		.nack_disconnect	= vs_client_core_fatal_error,
+
+		.msg_service_created	= vs_client_core_handle_service_created,
+		.msg_service_removed	= vs_client_core_handle_service_removed,
+
+		.msg_startup		= vs_core_client_startup,
+		/* FIXME: Jira ticket SDK-3074 - philipd. */
+		.msg_shutdown		= vs_client_core_fatal_error,
+		.msg_server_ready	= vs_client_core_handle_server_ready,
+		.msg_service_reset	= vs_client_core_handle_service_reset,
+	},
+};
+
+/*
+ * Client bus driver
+ */
+static int vs_client_bus_match(struct device *dev, struct device_driver *driver)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
+
+	/* Don't match anything to the devio driver; it's bound manually */
+	if (!vsdrv->protocol)
+		return 0;
+
+	WARN_ON_ONCE(service->is_server || vsdrv->is_server);
+
+	/* Match if the protocol strings are the same */
+	if (strcmp(service->protocol, vsdrv->protocol) == 0)
+		return 1;
+
+	return 0;
+}
+
+static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
+}
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
+}
+
+static ssize_t dev_protocol_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
+}
+
+static ssize_t service_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
+}
+
+static ssize_t quota_in_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->send_quota);
+}
+
+static ssize_t quota_out_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->recv_quota);
+}
+
+static struct device_attribute vs_client_dev_attrs[] = {
+	__ATTR_RO(id),
+	__ATTR_RO(is_server),
+	__ATTR(protocol, S_IRUGO, dev_protocol_show, NULL),
+	__ATTR_RO(service_name),
+	__ATTR_RO(quota_in),
+	__ATTR_RO(quota_out),
+	__ATTR_NULL
+};
+
+static ssize_t protocol_show(struct device_driver *drv, char *buf)
+{
+	struct vs_service_driver *driver = to_vs_service_driver(drv);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", driver->protocol);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+static struct driver_attribute vs_client_drv_attrs[] = {
+	__ATTR_RO(protocol),
+	__ATTR_NULL
+};
+#else
+static DRIVER_ATTR_RO(protocol);
+
+static struct attribute *vs_client_drv_attrs[] = {
+	&driver_attr_protocol.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vs_client_drv);
+#endif
+
+struct bus_type vs_client_bus_type = {
+	.name		= "vservices-client",
+	.dev_attrs	= vs_client_dev_attrs,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+	.drv_attrs	= vs_client_drv_attrs,
+#else
+	.drv_groups	= vs_client_drv_groups,
+#endif
+	.match		= vs_client_bus_match,
+	.probe		= vs_service_bus_probe,
+	.remove		= vs_service_bus_remove,
+	.uevent		= vs_service_bus_uevent,
+};
+EXPORT_SYMBOL(vs_client_bus_type);
+
+/*
+ * Client session driver
+ */
+static int vs_client_session_probe(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_service_device *service;
+	char *protocol, *name;
+	int ret = 0;
+
+	if (session->is_server) {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	/* create a service for the core protocol client */
+	protocol = kstrdup(VSERVICE_CORE_PROTOCOL_NAME, GFP_KERNEL);
+	if (!protocol) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	name = kstrdup("core", GFP_KERNEL);
+	if (!name) {
+		ret = -ENOMEM;
+		goto fail_free_protocol;
+	}
+
+	service = vs_service_register(session, NULL, 0, protocol, name, NULL);
+	if (IS_ERR(service)) {
+		ret = PTR_ERR(service);
+		goto fail_free_name;
+	}
+
+fail_free_name:
+	kfree(name);
+fail_free_protocol:
+	kfree(protocol);
+fail:
+	return ret;
+}
+
+static int
+vs_client_session_send_service_reset(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	if (WARN_ON(service->id == 0))
+		return -EINVAL;
+
+	return vs_client_core_queue_service_reset(session, service);
+}
+
+static struct vs_session_driver vs_client_session_driver = {
+	.driver	= {
+		.name			= "vservices-client-session",
+		.owner			= THIS_MODULE,
+		.bus			= &vs_session_bus_type,
+		.probe			= vs_client_session_probe,
+		.suppress_bind_attrs	= true,
+	},
+	.is_server		= false,
+	.service_bus		= &vs_client_bus_type,
+	.service_local_reset	= vs_client_session_send_service_reset,
+};
+
+static int __init vs_core_client_init(void)
+{
+	int ret;
+
+	ret = bus_register(&vs_client_bus_type);
+	if (ret)
+		goto fail_bus_register;
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	vs_devio_client_driver.driver.bus = &vs_client_bus_type;
+	vs_devio_client_driver.driver.owner = THIS_MODULE;
+	ret = driver_register(&vs_devio_client_driver.driver);
+	if (ret)
+		goto fail_devio_register;
+#endif
+
+	ret = driver_register(&vs_client_session_driver.driver);
+	if (ret)
+		goto fail_driver_register;
+
+	ret = vservice_core_client_register(&vs_core_client_driver,
+			"vs_core_client");
+	if (ret)
+		goto fail_core_register;
+
+	vservices_client_root = kobject_create_and_add("client-sessions",
+			vservices_root);
+	if (!vservices_client_root) {
+		ret = -ENOMEM;
+		goto fail_create_root;
+	}
+
+	return 0;
+
+fail_create_root:
+	vservice_core_client_unregister(&vs_core_client_driver);
+fail_core_register:
+	driver_unregister(&vs_client_session_driver.driver);
+fail_driver_register:
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_client_driver.driver);
+	vs_devio_client_driver.driver.bus = NULL;
+	vs_devio_client_driver.driver.owner = NULL;
+fail_devio_register:
+#endif
+	bus_unregister(&vs_client_bus_type);
+fail_bus_register:
+	return ret;
+}
+
+static void __exit vs_core_client_exit(void)
+{
+	kobject_put(vservices_client_root);
+	vservice_core_client_unregister(&vs_core_client_driver);
+	driver_unregister(&vs_client_session_driver.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_client_driver.driver);
+	vs_devio_client_driver.driver.bus = NULL;
+	vs_devio_client_driver.driver.owner = NULL;
+#endif
+	bus_unregister(&vs_client_bus_type);
+}
+
+subsys_initcall(vs_core_client_init);
+module_exit(vs_core_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Core Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/core_server.c b/drivers/vservices/core_server.c
new file mode 100644
index 0000000..76ca83c
--- /dev/null
+++ b/drivers/vservices/core_server.c
@@ -0,0 +1,1651 @@
+/*
+ * drivers/vservices/core_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Server side core service application driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+
+#include <vservices/types.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/server.h>
+
+#include "transport.h"
+#include "session.h"
+#include "compat.h"
+
+#define VSERVICE_CORE_SERVICE_NAME	"core"
+
+struct core_server {
+	struct vs_server_core_state	state;
+	struct vs_service_device	*service;
+
+	/*
+	 * A list of messages to send, a mutex protecting it, and a
+	 * work item to process the list.
+	 */
+	struct list_head		message_queue;
+	struct mutex			message_queue_lock;
+	struct work_struct		message_queue_work;
+
+	struct mutex			alloc_lock;
+
+	/* The following are all protected by alloc_lock. */
+	unsigned long			*in_notify_map;
+	int				in_notify_map_bits;
+
+	unsigned long			*out_notify_map;
+	int				out_notify_map_bits;
+
+	unsigned			in_quota_remaining;
+	unsigned			out_quota_remaining;
+};
+
+/*
+ * Used for message deferral when the core service is over quota.
+ */
+struct pending_message {
+	vservice_core_message_id_t		type;
+	struct vs_service_device		*service;
+	struct list_head			list;
+};
+
+#define to_core_server(x)	container_of(x, struct core_server, state)
+#define dev_to_core_server(x)	to_core_server(dev_get_drvdata(x))
+
+static struct vs_session_device *
+vs_core_server_session(struct core_server *server)
+{
+	return vs_service_get_session(server->service);
+}
+
+static struct core_server *
+vs_server_session_core_server(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service = session->core_service;
+
+	if (!core_service)
+		return NULL;
+
+	return dev_to_core_server(&core_service->dev);
+}
+
+static int vs_server_core_send_service_removed(struct core_server *server,
+		struct vs_service_device *service)
+{
+	return vs_server_core_core_send_service_removed(&server->state,
+			service->id, GFP_KERNEL);
+}
+
+static bool
+cancel_pending_created(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	list_for_each_entry(msg, &server->message_queue, list) {
+		if (msg->type == VSERVICE_CORE_CORE_MSG_SERVICE_CREATED &&
+				msg->service == service) {
+			vs_put_service(msg->service);
+			list_del(&msg->list);
+			kfree(msg);
+
+			/* there can only be one */
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int vs_server_core_queue_service_removed(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	mutex_lock(&server->message_queue_lock);
+
+	/*
+	 * If we haven't sent the notification that the service was created,
+	 * nuke it and do nothing else.
+	 *
+	 * This is not just an optimisation; see below.
+	 */
+	if (cancel_pending_created(server, service)) {
+		mutex_unlock(&server->message_queue_lock);
+		return 0;
+	}
+
+	/*
+	 * Do nothing if the core state is not connected. We must avoid
+	 * queueing service_removed messages on a reset service.
+	 *
+	 * Note that we cannot take the core server state lock here, because
+	 * we may (or may not) have been called from a core service message
+	 * handler. Thus, we must beware of races with changes to this
+	 * condition:
+	 *
+	 * - It becomes true when the req_connect handler sends an
+	 *   ack_connect, *after* it queues service_created for each existing
+	 *   service (while holding the service ready lock). The handler sends
+	 *   ack_connect with the message queue lock held.
+	 *
+	 *   - If we see the service as connected, then the req_connect
+	 *     handler has already queued and sent a service_created for this
+	 *     service, so it's ok for us to send a service_removed.
+	 *
+	 *   - If we see it as disconnected, the req_connect handler hasn't
+	 *     taken the message queue lock to send ack_connect yet, and thus
+	 *     has not released the service state lock; so if it queued a
+	 *     service_created we caught it in the flush above before it was
+	 *     sent.
+	 *
+	 * - It becomes false before the reset / disconnect handlers are
+	 *   called and those will both flush the message queue afterwards.
+	 *
+	 *   - If we see the service as connected, then the reset / disconnect
+	 *     handler is going to flush the message.
+	 *
+	 *   - If we see it disconnected, the state change has occurred and
+	 *     implicitly had the same effect as this message, so doing
+	 *     nothing is correct.
+	 *
+	 * Note that ordering in all of the above cases is guaranteed by the
+	 * message queue lock.
+	 */
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+		mutex_unlock(&server->message_queue_lock);
+		return 0;
+	}
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&server->message_queue_lock);
+		return -ENOMEM;
+	}
+
+	msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+
+	list_add_tail(&msg->list, &server->message_queue);
+
+	mutex_unlock(&server->message_queue_lock);
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static int vs_server_core_send_service_created(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct vs_session_device *session =
+			vs_service_get_session(server->service);
+
+	struct vs_mbuf *mbuf;
+	struct vs_string service_name, protocol_name;
+	size_t service_name_len, protocol_name_len;
+
+	int err;
+
+	mbuf = vs_server_core_core_alloc_service_created(&server->state,
+			&service_name, &protocol_name, GFP_KERNEL);
+
+	if (IS_ERR(mbuf))
+		return PTR_ERR(mbuf);
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+			"Sending service created message for %d (%s:%s)\n",
+			service->id, service->name, service->protocol);
+
+	service_name_len = strlen(service->name);
+	protocol_name_len = strlen(service->protocol);
+
+	if (service_name_len > vs_string_max_size(&service_name) ||
+			protocol_name_len > vs_string_max_size(&protocol_name)) {
+		dev_err(&session->dev,
+				"Invalid name/protocol for service %d (%s:%s)\n",
+				service->id, service->name,
+				service->protocol);
+		err = -EINVAL;
+		goto fail;
+	}
+
+	vs_string_copyin(&service_name, service->name);
+	vs_string_copyin(&protocol_name, service->protocol);
+
+	err = vs_server_core_core_send_service_created(&server->state,
+			service->id, service_name, protocol_name, mbuf);
+	if (err) {
+		dev_err(&session->dev,
+				"Fatal error sending service creation message for %d (%s:%s): %d\n",
+				service->id, service->name,
+				service->protocol, err);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	vs_server_core_core_free_service_created(&server->state,
+			&service_name, &protocol_name, mbuf);
+
+	return err;
+}
+
+static int vs_server_core_queue_service_created(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	lockdep_assert_held(&service->ready_lock);
+	lockdep_assert_held(&server->service->state_mutex);
+
+	mutex_lock(&server->message_queue_lock);
+
+	/*  Do nothing if the core state is disconnected.  */
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+		mutex_unlock(&server->message_queue_lock);
+		return 0;
+	}
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&server->message_queue_lock);
+		return -ENOMEM;
+	}
+
+	msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+
+	list_add_tail(&msg->list, &server->message_queue);
+
+	mutex_unlock(&server->message_queue_lock);
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static struct vs_service_device *
+__vs_server_core_register_service(struct vs_session_device *session,
+		vs_service_id_t service_id, struct vs_service_device *owner,
+		const char *name, const char *protocol, const void *plat_data)
+{
+	if (!session->is_server)
+		return ERR_PTR(-ENODEV);
+
+	if (!name || strnlen(name, VSERVICE_CORE_SERVICE_NAME_SIZE + 1) >
+			VSERVICE_CORE_SERVICE_NAME_SIZE || name[0] == '\n')
+		return ERR_PTR(-EINVAL);
+
+	/* The server core must only be registered as service_id zero */
+	if (service_id == 0 && (owner != NULL ||
+			strcmp(name, VSERVICE_CORE_SERVICE_NAME) != 0 ||
+			strcmp(protocol, VSERVICE_CORE_PROTOCOL_NAME) != 0))
+		return ERR_PTR(-EINVAL);
+
+	return vs_service_register(session, owner, service_id, protocol, name,
+			plat_data);
+}
+
+static struct vs_service_device *
+vs_server_core_create_service(struct core_server *server,
+		struct vs_session_device *session,
+		struct vs_service_device *owner, vs_service_id_t service_id,
+		const char *name, const char *protocol, const void *plat_data)
+{
+	struct vs_service_device *service;
+
+	service = __vs_server_core_register_service(session, service_id,
+			owner, name, protocol, plat_data);
+	if (IS_ERR(service))
+		return service;
+
+	if (protocol) {
+		vs_service_state_lock(server->service);
+		vs_service_start(service);
+		if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
+			vs_service_enable(service);
+		vs_service_state_unlock(server->service);
+	}
+
+	return service;
+}
+
+static int
+vs_server_core_send_service_reset_ready(struct core_server *server,
+		vservice_core_message_id_t type,
+		struct vs_service_device *service)
+{
+	bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(server->service);
+	int err;
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+			"Sending %s for service %d\n",
+			is_reset ? "reset" : "ready", service->id);
+
+	if (is_reset)
+		err = vs_server_core_core_send_service_reset(&server->state,
+				service->id, GFP_KERNEL);
+	else
+		err = vs_server_core_core_send_server_ready(&server->state,
+				service->id, service->recv_quota,
+				service->send_quota,
+				service->notify_recv_offset,
+				service->notify_recv_bits,
+				service->notify_send_offset,
+				service->notify_send_bits,
+				GFP_KERNEL);
+
+	return err;
+}
+
+static bool
+cancel_pending_ready(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	list_for_each_entry(msg, &server->message_queue, list) {
+		if (msg->type == VSERVICE_CORE_CORE_MSG_SERVER_READY &&
+				msg->service == service) {
+			vs_put_service(msg->service);
+			list_del(&msg->list);
+			kfree(msg);
+
+			/* there can only be one */
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int
+vs_server_core_queue_service_reset_ready(struct core_server *server,
+		vservice_core_message_id_t type,
+		struct vs_service_device *service)
+{
+	bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
+	struct pending_message *msg;
+
+	mutex_lock(&server->message_queue_lock);
+
+	/*
+	 * If this is a reset, and there is an outgoing ready in the
+	 * queue, we must cancel it so it can't be sent with invalid
+	 * transport resources, and then return immediately so we
+	 * don't send a redundant reset.
+	 */
+	if (is_reset && cancel_pending_ready(server, service)) {
+		mutex_unlock(&server->message_queue_lock);
+		return VS_SERVICE_ALREADY_RESET;
+	}
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&server->message_queue_lock);
+		return -ENOMEM;
+	}
+
+	msg->type = type;
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+	list_add_tail(&msg->list, &server->message_queue);
+
+	mutex_unlock(&server->message_queue_lock);
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static int vs_core_server_tx_ready(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(server->service);
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev, "tx_ready\n");
+
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static void message_queue_work(struct work_struct *work)
+{
+	struct core_server *server = container_of(work, struct core_server,
+			message_queue_work);
+	struct pending_message *msg;
+	int err;
+
+	vs_service_state_lock(server->service);
+
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+		vs_service_state_unlock(server->service);
+		return;
+	}
+
+	/*
+	 * If any pending message fails we exit the loop immediately so that
+	 * we preserve the message order.
+	 */
+	mutex_lock(&server->message_queue_lock);
+	while (!list_empty(&server->message_queue)) {
+		msg = list_first_entry(&server->message_queue,
+				struct pending_message, list);
+
+		switch (msg->type) {
+		case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
+			err = vs_server_core_send_service_created(server,
+					msg->service);
+			break;
+
+		case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
+			err = vs_server_core_send_service_removed(server,
+					msg->service);
+			break;
+
+		case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+		case VSERVICE_CORE_CORE_MSG_SERVER_READY:
+			err = vs_server_core_send_service_reset_ready(
+					server, msg->type, msg->service);
+			break;
+
+		default:
+			dev_warn(&server->service->dev,
+					"Don't know how to handle pending message type %d\n",
+					msg->type);
+			err = 0;
+			break;
+		}
+
+		/*
+		 * If we're out of quota we exit and wait for tx_ready to
+		 * queue us again.
+		 */
+		if (err == -ENOBUFS)
+			break;
+
+		/* Any other error is fatal */
+		if (err < 0) {
+			dev_err(&server->service->dev,
+					"Failed to send pending message type %d: %d - resetting session",
+					msg->type, err);
+			vs_service_reset_nosync(server->service);
+			break;
+		}
+
+		/*
+		 * The message sent successfully - remove it from the
+		 * queue. The corresponding vs_get_service() was done
+		 * when the pending message was created.
+		 */
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+	mutex_unlock(&server->message_queue_lock);
+
+	vs_service_state_unlock(server->service);
+
+	return;
+}
+
+/*
+ * Core server sysfs interface
+ */
+static ssize_t server_core_create_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = to_vs_session_device(dev->parent);
+	struct core_server *server = dev_to_core_server(&service->dev);
+	struct vs_service_device *new_service;
+	char *p;
+	ssize_t ret = count;
+
+	/* FIXME - Buffer sizes are not defined in generated headers */
+	/* discard leading whitespace */
+	while (count && isspace(*buf)) {
+		buf++;
+		count--;
+	}
+	if (!count) {
+		dev_info(dev, "empty service name");
+		return -EINVAL;
+	}
+	/* discard trailing whitespace */
+	while (count && isspace(buf[count - 1]))
+		count--;
+
+	if (count > VSERVICE_CORE_SERVICE_NAME_SIZE) {
+		dev_info(dev, "service name too long (max %d)\n", VSERVICE_CORE_SERVICE_NAME_SIZE);
+		return -EINVAL;
+	}
+
+	p = kstrndup(buf, count, GFP_KERNEL);
+
+	/*
+	 * Writing a service name to this file creates a new service. The
+	 * service is created without a protocol. It will appear in sysfs
+	 * but will not be bound to a driver until a valid protocol name
+	 * has been written to the created devices protocol sysfs attribute.
+	 */
+	new_service = vs_server_core_create_service(server, session, service,
+			VS_SERVICE_AUTO_ALLOCATE_ID, p, NULL, NULL);
+	if (IS_ERR(new_service))
+		ret = PTR_ERR(new_service);
+
+	kfree(p);
+
+	return ret;
+}
+
+static ssize_t server_core_reset_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *core_service = to_vs_service_device(dev);
+	struct vs_session_device *session =
+		vs_service_get_session(core_service);
+	struct vs_service_device *target;
+	vs_service_id_t service_id;
+	unsigned long val;
+	int err;
+
+	/*
+	 * Writing a valid service_id to this file does a reset of that service
+	 */
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	service_id = val;
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -EINVAL;
+
+	err = vs_service_reset(target, core_service);
+
+	vs_put_service(target);
+	return err < 0 ? err : count;
+}
+
+static ssize_t server_core_remove_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_service_device *target;
+	vs_service_id_t service_id;
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	service_id = val;
+	if (service_id == 0) {
+		/*
+		 * We don't allow removing the core service this way. The
+		 * core service will be removed when the session is removed.
+		 */
+		return -EINVAL;
+	}
+
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -EINVAL;
+
+	err = vs_service_delete(target, service);
+
+	vs_put_service(target);
+	return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR(create_service, S_IWUSR,
+		NULL, server_core_create_service_store);
+static DEVICE_ATTR(reset_service, S_IWUSR,
+		NULL, server_core_reset_service_store);
+static DEVICE_ATTR(remove_service, S_IWUSR,
+		NULL, server_core_remove_service_store);
+
+static struct attribute *server_core_dev_attrs[] = {
+	&dev_attr_create_service.attr,
+	&dev_attr_reset_service.attr,
+	&dev_attr_remove_service.attr,
+	NULL,
+};
+
+static const struct attribute_group server_core_attr_group = {
+	.attrs = server_core_dev_attrs,
+};
+
+static int init_transport_resource_allocation(struct core_server *server)
+{
+	struct vs_session_device *session = vs_core_server_session(server);
+	struct vs_transport *transport = session->transport;
+	size_t size;
+	int err;
+
+	mutex_init(&server->alloc_lock);
+	mutex_lock(&server->alloc_lock);
+
+	transport->vt->get_quota_limits(transport, &server->out_quota_remaining,
+			&server->in_quota_remaining);
+
+	transport->vt->get_notify_bits(transport, &server->out_notify_map_bits,
+			&server->in_notify_map_bits);
+
+	size = BITS_TO_LONGS(server->in_notify_map_bits) *
+			sizeof(unsigned long);
+	server->in_notify_map = kzalloc(size, GFP_KERNEL);
+	if (server->in_notify_map_bits && !server->in_notify_map) {
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	size = BITS_TO_LONGS(server->out_notify_map_bits) *
+			sizeof(unsigned long);
+	server->out_notify_map = kzalloc(size, GFP_KERNEL);
+	if (server->out_notify_map_bits && !server->out_notify_map) {
+		err = -ENOMEM;
+		goto fail_free_in_bits;
+	}
+
+	mutex_unlock(&server->alloc_lock);
+
+	return 0;
+
+fail_free_in_bits:
+	kfree(server->in_notify_map);
+fail:
+	mutex_unlock(&server->alloc_lock);
+	return err;
+}
+
+static int alloc_quota(unsigned minimum, unsigned best, unsigned set,
+		unsigned *remaining)
+{
+	unsigned quota;
+
+	if (set) {
+		quota = set;
+
+		if (quota > *remaining)
+			return -ENOSPC;
+	} else if (best) {
+		quota = min(best, *remaining);
+	} else {
+		quota = minimum;
+	}
+
+	if (quota < minimum)
+		return -ENOSPC;
+
+	*remaining -= quota;
+
+	return min_t(unsigned, quota, INT_MAX);
+}
+
+static int alloc_notify_bits(unsigned notify_count, unsigned long *map,
+		unsigned nr_bits)
+{
+	unsigned offset;
+
+	if (notify_count) {
+		offset = bitmap_find_next_zero_area(map, nr_bits, 0,
+				notify_count, 0);
+
+		if (offset >= nr_bits || offset > (unsigned)INT_MAX)
+			return -ENOSPC;
+
+		bitmap_set(map, offset, notify_count);
+	} else {
+		offset = 0;
+	}
+
+	return offset;
+}
+
+/*
+ * alloc_transport_resources - Allocates the quotas and notification bits for
+ * a service.
+ * @server: the core service state.
+ * @service: the service device to allocate resources for.
+ *
+ * This function allocates message quotas and notification bits. It is called
+ * for the core service in alloc(), and for every other service by the server
+ * bus probe() function.
+ */
+static int alloc_transport_resources(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(service);
+	unsigned in_bit_offset, out_bit_offset;
+	unsigned in_quota, out_quota;
+	int ret;
+	struct vs_service_driver *driver;
+
+	if (WARN_ON(!service->dev.driver))
+		return -ENODEV;
+
+	mutex_lock(&server->alloc_lock);
+
+	driver = to_vs_service_driver(service->dev.driver);
+
+	/* Quota allocations */
+	ret = alloc_quota(driver->in_quota_min, driver->in_quota_best,
+			service->in_quota_set, &server->in_quota_remaining);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate in quota\n");
+		goto fail_in_quota;
+	}
+	in_quota = ret;
+
+	ret = alloc_quota(driver->out_quota_min, driver->out_quota_best,
+			service->out_quota_set, &server->out_quota_remaining);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate out quota\n");
+		goto fail_out_quota;
+	}
+	out_quota = ret;
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+			"%d: quota in: %u out: %u; remaining in: %u out: %u\n",
+			service->id, in_quota, out_quota,
+			server->in_quota_remaining,
+			server->out_quota_remaining);
+
+	/* Notification bit allocations */
+	ret = alloc_notify_bits(service->notify_recv_bits,
+			server->in_notify_map, server->in_notify_map_bits);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate in notify bits\n");
+		goto fail_in_notify;
+	}
+	in_bit_offset = ret;
+
+	ret = alloc_notify_bits(service->notify_send_bits,
+			server->out_notify_map, server->out_notify_map_bits);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate out notify bits\n");
+		goto fail_out_notify;
+	}
+	out_bit_offset = ret;
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+			"notify bits in: %u/%u out: %u/%u\n",
+			in_bit_offset, service->notify_recv_bits,
+			out_bit_offset, service->notify_send_bits);
+
+	/* Fill in the device's allocations */
+	service->recv_quota = in_quota;
+	service->send_quota = out_quota;
+	service->notify_recv_offset = in_bit_offset;
+	service->notify_send_offset = out_bit_offset;
+
+	mutex_unlock(&server->alloc_lock);
+
+	return 0;
+
+fail_out_notify:
+	if (service->notify_recv_bits)
+		bitmap_clear(server->in_notify_map,
+				in_bit_offset, service->notify_recv_bits);
+fail_in_notify:
+	server->out_quota_remaining += out_quota;
+fail_out_quota:
+	server->in_quota_remaining += in_quota;
+fail_in_quota:
+
+	mutex_unlock(&server->alloc_lock);
+
+	service->recv_quota = 0;
+	service->send_quota = 0;
+	service->notify_recv_bits = 0;
+	service->notify_recv_offset = 0;
+	service->notify_send_bits = 0;
+	service->notify_send_offset = 0;
+
+	return ret;
+}
+
+/*
+ * free_transport_resources - Frees the quotas and notification bits for
+ * a non-core service.
+ * @server: the core service state.
+ * @service: the service device to free resources for.
+ *
+ * This function is called by the server to free message quotas and
+ * notification bits that were allocated by alloc_transport_resources. It must
+ * only be called when the target service is in reset, and must be called with
+ * the core service's state lock held.
+ */
+static int free_transport_resources(struct core_server *server,
+		struct vs_service_device *service)
+{
+	mutex_lock(&server->alloc_lock);
+
+	if (service->notify_recv_bits)
+		bitmap_clear(server->in_notify_map,
+				service->notify_recv_offset,
+				service->notify_recv_bits);
+
+	if (service->notify_send_bits)
+		bitmap_clear(server->out_notify_map,
+				service->notify_send_offset,
+				service->notify_send_bits);
+
+	server->in_quota_remaining += service->recv_quota;
+	server->out_quota_remaining += service->send_quota;
+
+	mutex_unlock(&server->alloc_lock);
+
+	service->recv_quota = 0;
+	service->send_quota = 0;
+	service->notify_recv_bits = 0;
+	service->notify_recv_offset = 0;
+	service->notify_send_bits = 0;
+	service->notify_send_offset = 0;
+
+	return 0;
+}
+
+static struct vs_server_core_state *
+vs_core_server_alloc(struct vs_service_device *service)
+{
+	struct core_server *server;
+	int err;
+
+	if (WARN_ON(service->id != 0))
+		goto fail;
+
+	server = kzalloc(sizeof(*server), GFP_KERNEL);
+	if (!server)
+		goto fail;
+
+	server->service = service;
+	INIT_LIST_HEAD(&server->message_queue);
+	INIT_WORK(&server->message_queue_work, message_queue_work);
+	mutex_init(&server->message_queue_lock);
+
+	err = init_transport_resource_allocation(server);
+	if (err)
+		goto fail_init_alloc;
+
+	err = alloc_transport_resources(server, service);
+	if (err)
+		goto fail_alloc_transport;
+
+	err = sysfs_create_group(&service->dev.kobj, &server_core_attr_group);
+	if (err)
+		goto fail_sysfs;
+
+	return &server->state;
+
+fail_sysfs:
+	free_transport_resources(server, service);
+fail_alloc_transport:
+	kfree(server->out_notify_map);
+	kfree(server->in_notify_map);
+fail_init_alloc:
+	kfree(server);
+fail:
+	return NULL;
+}
+
+static void vs_core_server_release(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+
+	/* Delete all the other services */
+	vs_session_delete_noncore(session);
+
+	sysfs_remove_group(&server->service->dev.kobj, &server_core_attr_group);
+	kfree(server->out_notify_map);
+	kfree(server->in_notify_map);
+	kfree(server);
+}
+
+/**
+ * vs_server_create_service - create and register a new vService server
+ * @session: the session to create the vService server on
+ * @parent: an existing server that is managing the new server
+ * @name: the name of the new service
+ * @protocol: the protocol for the new service
+ * @plat_data: value to be assigned to (struct device *)->platform_data
+ */
+struct vs_service_device *
+vs_server_create_service(struct vs_session_device *session,
+		struct vs_service_device *parent, const char *name,
+		const char *protocol, const void *plat_data)
+{
+	struct vs_service_device *core_service, *new_service;
+	struct core_server *server;
+
+	if (!session->is_server || !name || !protocol)
+		return NULL;
+
+	core_service = session->core_service;
+	if (!core_service)
+		return NULL;
+
+	device_lock(&core_service->dev);
+	if (!core_service->dev.driver) {
+		device_unlock(&core_service->dev);
+		return NULL;
+	}
+
+	server = dev_to_core_server(&core_service->dev);
+
+	if (!parent)
+		parent = core_service;
+
+	new_service = vs_server_core_create_service(server, session, parent,
+			VS_SERVICE_AUTO_ALLOCATE_ID, name, protocol, plat_data);
+
+	device_unlock(&core_service->dev);
+
+	if (IS_ERR(new_service))
+		return NULL;
+
+	return new_service;
+}
+EXPORT_SYMBOL(vs_server_create_service);
+
+/**
+ * vs_server_destroy_service - destroy and unregister a vService server. This
+ * function must _not_ be used from the target service's own workqueue.
+ * @service: The service to destroy
+ */
+int vs_server_destroy_service(struct vs_service_device *service,
+		struct vs_service_device *parent)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	if (!session->is_server || service->id == 0)
+		return -EINVAL;
+
+	if (!parent)
+		parent = session->core_service;
+
+	return vs_service_delete(service, parent);
+}
+EXPORT_SYMBOL(vs_server_destroy_service);
+
+static void __queue_service_created(struct vs_service_device *service,
+		void *data)
+{
+	struct core_server *server = (struct core_server *)data;
+
+	vs_server_core_queue_service_created(server, service);
+}
+
+static int vs_server_core_handle_connect(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+	int err;
+
+	/* Tell the other end that we've finished connecting. */
+	err = vs_server_core_core_send_ack_connect(state, GFP_KERNEL);
+	if (err)
+		return err;
+
+	/* Queue a service-created message for each existing service. */
+	vs_session_for_each_service(session, __queue_service_created, server);
+
+	/* Re-enable all the services. */
+	vs_session_enable_noncore(session);
+
+	return 0;
+}
+
+static void vs_core_server_disable_services(struct core_server *server)
+{
+	struct vs_session_device *session = vs_core_server_session(server);
+	struct pending_message *msg;
+
+	/* Disable all the other services */
+	vs_session_disable_noncore(session);
+
+	/* Flush all the pending service-readiness messages */
+	mutex_lock(&server->message_queue_lock);
+	while (!list_empty(&server->message_queue)) {
+		msg = list_first_entry(&server->message_queue,
+				struct pending_message, list);
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+	mutex_unlock(&server->message_queue_lock);
+}
+
+static int vs_server_core_handle_disconnect(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+
+	vs_core_server_disable_services(server);
+
+	return vs_server_core_core_send_ack_disconnect(state, GFP_KERNEL);
+}
+
+static int
+vs_server_core_handle_service_reset(struct vs_server_core_state *state,
+		unsigned service_id)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+
+	if (service_id == 0)
+		return -EPROTO;
+
+	return vs_service_handle_reset(session, service_id, false);
+}
+
+static void vs_core_server_start(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+	int err;
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
+			"Core server start\n");
+
+	err = vs_server_core_core_send_startup(&server->state,
+			server->service->recv_quota,
+			server->service->send_quota, GFP_KERNEL);
+
+	if (err)
+		dev_err(&session->dev, "Failed to start core protocol: %d\n",
+				err);
+}
+
+static void vs_core_server_reset(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
+			"Core server reset\n");
+
+	vs_core_server_disable_services(server);
+}
+
+static struct vs_server_core vs_core_server_driver = {
+	.alloc		= vs_core_server_alloc,
+	.release	= vs_core_server_release,
+	.start		= vs_core_server_start,
+	.reset		= vs_core_server_reset,
+	.tx_ready	= vs_core_server_tx_ready,
+	.core = {
+		.req_connect		= vs_server_core_handle_connect,
+		.req_disconnect		= vs_server_core_handle_disconnect,
+		.msg_service_reset	= vs_server_core_handle_service_reset,
+	},
+};
+
+/*
+ * Server bus driver
+ */
+static int vs_server_bus_match(struct device *dev, struct device_driver *driver)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
+
+	/* Don't match anything to the devio driver; it's bound manually */
+	if (!vsdrv->protocol)
+		return 0;
+
+	WARN_ON_ONCE(!service->is_server || !vsdrv->is_server);
+
+	/* Don't match anything that doesn't have a protocol set yet */
+	if (!service->protocol)
+		return 0;
+
+	if (strcmp(service->protocol, vsdrv->protocol) == 0)
+		return 1;
+
+	return 0;
+}
+
+static int vs_server_bus_probe(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	int ret;
+
+	/*
+	 * Set the notify counts for the service, unless the driver is the
+	 * devio driver in which case it has already been done by the devio
+	 * bind ioctl. The devio driver cannot be bound automatically.
+	 */
+	struct vs_service_driver *driver =
+		to_vs_service_driver(service->dev.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	if (driver != &vs_devio_server_driver)
+#endif
+	{
+		service->notify_recv_bits = driver->in_notify_count;
+		service->notify_send_bits = driver->out_notify_count;
+	}
+
+	/*
+	 * We can't allocate transport resources here for the core service
+	 * because the resource pool doesn't exist yet. It's done in alloc()
+	 * instead (which is called, indirectly, by vs_service_bus_probe()).
+	 */
+	if (service->id == 0)
+		return vs_service_bus_probe(dev);
+
+	if (!server)
+		return -ENODEV;
+	ret = alloc_transport_resources(server, service);
+	if (ret < 0)
+		goto fail;
+
+	ret = vs_service_bus_probe(dev);
+	if (ret < 0)
+		goto fail_free_resources;
+
+	return 0;
+
+fail_free_resources:
+	free_transport_resources(server, service);
+fail:
+	return ret;
+}
+
+static int vs_server_bus_remove(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+
+	vs_service_bus_remove(dev);
+
+	/*
+	 * We skip free_transport_resources for the core service because the
+	 * resource pool has already been freed at this point. It's also
+	 * possible that the core service has disappeared, in which case
+	 * there's no work to do here.
+	 */
+	if (server != NULL && service->id != 0)
+		free_transport_resources(server, service);
+
+	return 0;
+}
+
+static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
+}
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
+}
+
+static ssize_t dev_protocol_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
+}
+
+struct service_enable_work_struct {
+	struct vs_service_device *service;
+	struct work_struct work;
+};
+
+static void service_enable_work(struct work_struct *work)
+{
+	struct service_enable_work_struct *enable_work = container_of(work,
+			struct service_enable_work_struct, work);
+	struct vs_service_device *service = enable_work->service;
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	bool started;
+	int ret;
+
+	kfree(enable_work);
+
+	if (!server)
+		return;
+	/* Start and enable the service */
+	vs_service_state_lock(server->service);
+	started = vs_service_start(service);
+	if (!started) {
+		vs_service_state_unlock(server->service);
+		vs_put_service(service);
+		return;
+	}
+
+	if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
+		vs_service_enable(service);
+	vs_service_state_unlock(server->service);
+
+	/* Tell the bus to search for a driver that supports the protocol */
+	ret = device_attach(&service->dev);
+	if (ret == 0)
+		dev_warn(&service->dev, "No driver found for protocol: %s\n",
+				service->protocol);
+	kobject_uevent(&service->dev.kobj, KOBJ_CHANGE);
+
+	/* The corresponding vs_get_service was done when the work was queued */
+	vs_put_service(service);
+}
+
+static ssize_t dev_protocol_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct service_enable_work_struct *enable_work;
+
+	/* The protocol can only be set once */
+	if (service->protocol)
+		return -EPERM;
+
+	/* Registering additional core servers is not allowed */
+	if (strcmp(buf, VSERVICE_CORE_PROTOCOL_NAME) == 0)
+		return -EINVAL;
+
+	if (strnlen(buf, VSERVICE_CORE_PROTOCOL_NAME_SIZE) + 1 >
+			VSERVICE_CORE_PROTOCOL_NAME_SIZE)
+		return -E2BIG;
+
+	enable_work = kmalloc(sizeof(*enable_work), GFP_KERNEL);
+	if (!enable_work)
+		return -ENOMEM;
+
+	/* Set the protocol and tell the client about it */
+	service->protocol = kstrdup(buf, GFP_KERNEL);
+	if (!service->protocol) {
+		kfree(enable_work);
+		return -ENOMEM;
+	}
+	strim(service->protocol);
+
+	/*
+	 * Schedule work to enable the service. We can't do it here because
+	 * we need to take the core service lock, and doing that here makes
+	 * it depend circularly on this sysfs attribute, which can be deleted
+	 * with that lock held.
+	 *
+	 * The corresponding vs_put_service is called in the enable_work
+	 * function.
+	 */
+	INIT_WORK(&enable_work->work, service_enable_work);
+	enable_work->service = vs_get_service(service);
+	schedule_work(&enable_work->work);
+
+	return count;
+}
+
+static ssize_t service_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
+}
+
+static ssize_t quota_in_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	int ret;
+	unsigned long in_quota;
+
+	if (!server)
+		return -ENODEV;
+	/*
+	 * Don't allow quota to be changed for services that have a driver
+	 * bound. We take the alloc lock here because the device lock is held
+	 * while creating and destroying this sysfs item. This means we can
+	 * race with driver binding, but that doesn't matter: we actually just
+	 * want to know that alloc_transport_resources() hasn't run yet, and
+	 * that takes the alloc lock.
+	 */
+	mutex_lock(&server->alloc_lock);
+	if (service->dev.driver) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	ret = kstrtoul(buf, 0, &in_quota);
+	if (ret < 0)
+		goto out;
+
+	service->in_quota_set = in_quota;
+	ret = count;
+
+out:
+	mutex_unlock(&server->alloc_lock);
+
+	return ret;
+}
+
+static ssize_t quota_in_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", service->recv_quota);
+}
+
+static ssize_t quota_out_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	int ret;
+	unsigned long out_quota;
+
+	if (!server)
+		return -ENODEV;
+	/* See comment in quota_in_store. */
+	mutex_lock(&server->alloc_lock);
+	if (service->dev.driver) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	ret = kstrtoul(buf, 0, &out_quota);
+	if (ret < 0)
+		goto out;
+
+	service->out_quota_set = out_quota;
+	ret = count;
+
+out:
+	mutex_unlock(&server->alloc_lock);
+
+	return ret;
+}
+
+static ssize_t quota_out_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", service->send_quota);
+}
+
+static struct device_attribute vs_server_dev_attrs[] = {
+	__ATTR_RO(id),
+	__ATTR_RO(is_server),
+	__ATTR(protocol, S_IRUGO | S_IWUSR,
+			dev_protocol_show, dev_protocol_store),
+	__ATTR_RO(service_name),
+	__ATTR(quota_in, S_IRUGO | S_IWUSR,
+			quota_in_show, quota_in_store),
+	__ATTR(quota_out, S_IRUGO | S_IWUSR,
+			quota_out_show, quota_out_store),
+	__ATTR_NULL
+};
+
+static ssize_t protocol_show(struct device_driver *drv, char *buf)
+{
+	struct vs_service_driver *vsdrv = to_vs_service_driver(drv);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", vsdrv->protocol);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+static struct driver_attribute vs_server_drv_attrs[] = {
+	__ATTR_RO(protocol),
+	__ATTR_NULL
+};
+#else
+static DRIVER_ATTR_RO(protocol);
+
+static struct attribute *vs_server_drv_attrs[] = {
+	&driver_attr_protocol.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vs_server_drv);
+#endif
+
+struct bus_type vs_server_bus_type = {
+	.name		= "vservices-server",
+	.dev_attrs	= vs_server_dev_attrs,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+	.drv_attrs	= vs_server_drv_attrs,
+#else
+	.drv_groups	= vs_server_drv_groups,
+#endif
+	.match		= vs_server_bus_match,
+	.probe		= vs_server_bus_probe,
+	.remove		= vs_server_bus_remove,
+	.uevent		= vs_service_bus_uevent,
+};
+EXPORT_SYMBOL(vs_server_bus_type);
+
+/*
+ * Server session driver
+ */
+static int vs_server_session_probe(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_service_device *service;
+
+	service = __vs_server_core_register_service(session, 0, NULL,
+			VSERVICE_CORE_SERVICE_NAME,
+			VSERVICE_CORE_PROTOCOL_NAME, NULL);
+	if (IS_ERR(service))
+		return PTR_ERR(service);
+
+	return 0;
+}
+
+static int
+vs_server_session_service_added(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	if (WARN_ON(!server || !service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_created(server, service);
+
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send service_created: %d\n", err);
+
+	return err;
+}
+
+static int
+vs_server_session_service_start(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	if (WARN_ON(!server || !service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_reset_ready(server,
+			VSERVICE_CORE_CORE_MSG_SERVER_READY, service);
+
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send server_ready: %d\n", err);
+
+	return err;
+}
+
+static int
+vs_server_session_service_local_reset(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	if (WARN_ON(!server || !service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_reset_ready(server,
+			VSERVICE_CORE_CORE_MSG_SERVICE_RESET, service);
+
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send service_reset: %d\n", err);
+
+	return err;
+}
+
+static int
+vs_server_session_service_removed(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	/*
+	 * It's possible for the core server to be forcibly removed before
+	 * the other services, for example when the underlying transport
+	 * vanishes. If that happens, we can end up here with a NULL core
+	 * server pointer.
+	 */
+	if (!server)
+		return 0;
+
+	if (WARN_ON(!service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_removed(server, service);
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send service_removed: %d\n", err);
+
+	return err;
+}
+
+static struct vs_session_driver vs_server_session_driver = {
+	.driver	= {
+		.name			= "vservices-server-session",
+		.owner			= THIS_MODULE,
+		.bus			= &vs_session_bus_type,
+		.probe			= vs_server_session_probe,
+		.suppress_bind_attrs	= true,
+	},
+	.is_server		= true,
+	.service_bus		= &vs_server_bus_type,
+	.service_added		= vs_server_session_service_added,
+	.service_start		= vs_server_session_service_start,
+	.service_local_reset	= vs_server_session_service_local_reset,
+	.service_removed	= vs_server_session_service_removed,
+};
+
+static int __init vs_core_server_init(void)
+{
+	int ret;
+
+	ret = bus_register(&vs_server_bus_type);
+	if (ret)
+		goto fail_bus_register;
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	vs_devio_server_driver.driver.bus = &vs_server_bus_type;
+	vs_devio_server_driver.driver.owner = THIS_MODULE;
+	ret = driver_register(&vs_devio_server_driver.driver);
+	if (ret)
+		goto fail_devio_register;
+#endif
+
+	ret = driver_register(&vs_server_session_driver.driver);
+	if (ret)
+		goto fail_driver_register;
+
+	ret = vservice_core_server_register(&vs_core_server_driver,
+			"vs_core_server");
+	if (ret)
+		goto fail_core_register;
+
+	vservices_server_root = kobject_create_and_add("server-sessions",
+			vservices_root);
+	if (!vservices_server_root) {
+		ret = -ENOMEM;
+		goto fail_create_root;
+	}
+
+	return 0;
+
+fail_create_root:
+	vservice_core_server_unregister(&vs_core_server_driver);
+fail_core_register:
+	driver_unregister(&vs_server_session_driver.driver);
+fail_driver_register:
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_server_driver.driver);
+	vs_devio_server_driver.driver.bus = NULL;
+	vs_devio_server_driver.driver.owner = NULL;
+fail_devio_register:
+#endif
+	bus_unregister(&vs_server_bus_type);
+fail_bus_register:
+	return ret;
+}
+
+static void __exit vs_core_server_exit(void)
+{
+	kobject_put(vservices_server_root);
+	vservice_core_server_unregister(&vs_core_server_driver);
+	driver_unregister(&vs_server_session_driver.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_server_driver.driver);
+	vs_devio_server_driver.driver.bus = NULL;
+	vs_devio_server_driver.driver.owner = NULL;
+#endif
+	bus_unregister(&vs_server_bus_type);
+}
+
+subsys_initcall(vs_core_server_init);
+module_exit(vs_core_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Core Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/debug.h b/drivers/vservices/debug.h
new file mode 100644
index 0000000..b379b04
--- /dev/null
+++ b/drivers/vservices/debug.h
@@ -0,0 +1,74 @@
+/*
+ * drivers/vservices/debug.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Debugging macros and support functions for Virtual Services.
+ */
+#ifndef _VSERVICES_DEBUG_H
+#define _VSERVICES_DEBUG_H
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+#include <linux/printk.h>
+#else
+#ifndef no_printk
+#define no_printk(format, args...) do { } while (0)
+#endif
+#endif
+
+#include <vservices/session.h>
+#include "transport.h"
+
+#define VS_DEBUG_TRANSPORT		(1 << 0)
+#define VS_DEBUG_TRANSPORT_MESSAGES	(1 << 1)
+#define VS_DEBUG_SESSION		(1 << 2)
+#define VS_DEBUG_CLIENT			(1 << 3)
+#define VS_DEBUG_CLIENT_CORE		(1 << 4)
+#define VS_DEBUG_SERVER			(1 << 5)
+#define VS_DEBUG_SERVER_CORE		(1 << 6)
+#define VS_DEBUG_PROTOCOL		(1 << 7)
+#define VS_DEBUG_ALL			0xff
+
+#ifdef CONFIG_VSERVICES_DEBUG
+
+#define vs_debug(type, session, format, args...)			\
+	do {								\
+		if ((session)->debug_mask & (type))			\
+			dev_dbg(&(session)->dev, format, ##args);	\
+	} while (0)
+
+#define vs_dev_debug(type, session, dev, format, args...)		\
+	do {								\
+		if ((session)->debug_mask & (type))			\
+			dev_dbg(dev, format, ##args);			\
+	} while (0)
+
+static inline void vs_debug_dump_mbuf(struct vs_session_device *session,
+		struct vs_mbuf *mbuf)
+{
+	if (session->debug_mask & VS_DEBUG_TRANSPORT_MESSAGES)
+		print_hex_dump_bytes("msg:", DUMP_PREFIX_OFFSET,
+				mbuf->data, mbuf->size);
+}
+
+#else
+
+/* Dummy versions: Use no_printk to retain type/format string checking */
+#define vs_debug(type, session, format, args...) \
+	do { (void)session; no_printk(format, ##args); } while(0)
+
+#define vs_dev_debug(type, session, dev, format, args...) \
+	do { (void)session; (void)dev; no_printk(format, ##args); } while(0)
+
+static inline void vs_debug_dump_mbuf(struct vs_session_device *session,
+		struct vs_mbuf *mbuf) {}
+
+#endif /* CONFIG_VSERVICES_DEBUG */
+
+#endif /* _VSERVICES_DEBUG_H */
diff --git a/drivers/vservices/devio.c b/drivers/vservices/devio.c
new file mode 100644
index 0000000..b3ed4ab
--- /dev/null
+++ b/drivers/vservices/devio.c
@@ -0,0 +1,1059 @@
+/*
+ * devio.c - cdev I/O for service devices
+ *
+ * Copyright (c) 2016 Cog Systems Pty Ltd
+ *     Author: Philip Derrin <philip@cog.systems>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/security.h>
+#include <linux/compat.h>
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+#include <vservices/ioctl.h>
+#include "session.h"
+
+#define VSERVICES_DEVICE_MAX (VS_MAX_SERVICES * VS_MAX_SESSIONS)
+
+struct vs_devio_priv {
+	struct kref kref;
+	bool running, reset;
+
+	/* Receive queue */
+	wait_queue_head_t recv_wq;
+	atomic_t notify_pending;
+	struct list_head recv_queue;
+};
+
+static void
+vs_devio_priv_free(struct kref *kref)
+{
+	struct vs_devio_priv *priv = container_of(kref, struct vs_devio_priv,
+			kref);
+
+	WARN_ON(priv->running);
+	WARN_ON(!list_empty_careful(&priv->recv_queue));
+	WARN_ON(waitqueue_active(&priv->recv_wq));
+
+	kfree(priv);
+}
+
+static void vs_devio_priv_put(struct vs_devio_priv *priv)
+{
+	kref_put(&priv->kref, vs_devio_priv_free);
+}
+
+static int
+vs_devio_service_probe(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv;
+
+	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	kref_init(&priv->kref);
+	priv->running = false;
+	priv->reset = false;
+	init_waitqueue_head(&priv->recv_wq);
+	atomic_set(&priv->notify_pending, 0);
+	INIT_LIST_HEAD(&priv->recv_queue);
+
+	dev_set_drvdata(&service->dev, priv);
+
+	wake_up(&service->quota_wq);
+
+	return 0;
+}
+
+static int
+vs_devio_service_remove(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+	WARN_ON(priv->running);
+	WARN_ON(!list_empty_careful(&priv->recv_queue));
+	WARN_ON(waitqueue_active(&priv->recv_wq));
+
+	vs_devio_priv_put(priv);
+
+	return 0;
+}
+
+static int
+vs_devio_service_receive(struct vs_service_device *service,
+		struct vs_mbuf *mbuf)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+	WARN_ON(!priv->running);
+
+	spin_lock(&priv->recv_wq.lock);
+	list_add_tail(&mbuf->queue, &priv->recv_queue);
+	wake_up_locked(&priv->recv_wq);
+	spin_unlock(&priv->recv_wq.lock);
+
+	return 0;
+}
+
+static void
+vs_devio_service_notify(struct vs_service_device *service, u32 flags)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+	int old, cur;
+
+	WARN_ON(!priv->running);
+
+	if (!flags)
+		return;
+
+	/* open-coded atomic_or() */
+	cur = atomic_read(&priv->notify_pending);
+	while ((old = atomic_cmpxchg(&priv->notify_pending,
+					cur, cur | flags)) != cur)
+		cur = old;
+
+	wake_up(&priv->recv_wq);
+}
+
+static void
+vs_devio_service_start(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+	if (!priv->reset) {
+		WARN_ON(priv->running);
+		priv->running = true;
+		wake_up(&service->quota_wq);
+	}
+}
+
+static void
+vs_devio_service_reset(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+	struct vs_mbuf *mbuf, *tmp;
+
+	WARN_ON(!priv->running && !priv->reset);
+
+	/*
+	 * Mark the service as being in reset. This flag can never be cleared
+	 * on an open device; the user must acknowledge the reset by closing
+	 * and reopening the device.
+	 */
+	priv->reset = true;
+	priv->running = false;
+
+	spin_lock_irq(&priv->recv_wq.lock);
+	list_for_each_entry_safe(mbuf, tmp, &priv->recv_queue, queue)
+		vs_service_free_mbuf(service, mbuf);
+	INIT_LIST_HEAD(&priv->recv_queue);
+	spin_unlock_irq(&priv->recv_wq.lock);
+	wake_up_all(&priv->recv_wq);
+}
+
+/*
+ * This driver will be registered by the core server module, which must also
+ * set its bus and owner function pointers.
+ */
+struct vs_service_driver vs_devio_server_driver = {
+	/* No protocol, so the normal bus match will never bind this. */
+	.protocol	= NULL,
+	.is_server	= true,
+	.rx_atomic	= true,
+
+	.probe		= vs_devio_service_probe,
+	.remove		= vs_devio_service_remove,
+	.receive	= vs_devio_service_receive,
+	.notify		= vs_devio_service_notify,
+	.start		= vs_devio_service_start,
+	.reset		= vs_devio_service_reset,
+
+	/*
+	 * Set reasonable default quotas. These can be overridden by passing
+	 * nonzero values to IOCTL_VS_BIND_SERVER, which will set the
+	 * service's *_quota_set fields.
+	 */
+	.in_quota_min	= 1,
+	.in_quota_best	= 8,
+	.out_quota_min	= 1,
+	.out_quota_best	= 8,
+
+	/* Mark the notify counts as invalid; the service's will be used. */
+	.in_notify_count = (unsigned)-1,
+	.out_notify_count = (unsigned)-1,
+
+	.driver		= {
+		.name			= "vservices-server-devio",
+		.owner			= NULL, /* set by core server */
+		.bus			= NULL, /* set by core server */
+		.suppress_bind_attrs	= true, /* see vs_devio_poll */
+	},
+};
+EXPORT_SYMBOL_GPL(vs_devio_server_driver);
+
+static int
+vs_devio_bind_server(struct vs_service_device *service,
+		struct vs_ioctl_bind *bind)
+{
+	int ret = -ENODEV;
+
+	/* Ensure the server module is loaded and the driver is registered. */
+	if (!try_module_get(vs_devio_server_driver.driver.owner))
+		goto fail_module_get;
+
+	device_lock(&service->dev);
+	ret = -EBUSY;
+	if (service->dev.driver != NULL)
+		goto fail_device_unbound;
+
+	/* Set up the quota and notify counts. */
+	service->in_quota_set = bind->recv_quota;
+	service->out_quota_set = bind->send_quota;
+	service->notify_send_bits = bind->send_notify_bits;
+	service->notify_recv_bits = bind->recv_notify_bits;
+
+	/* Manually probe the driver. */
+	service->dev.driver = &vs_devio_server_driver.driver;
+	ret = service->dev.bus->probe(&service->dev);
+	if (ret < 0)
+		goto fail_probe_driver;
+
+	ret = device_bind_driver(&service->dev);
+	if (ret < 0)
+		goto fail_bind_driver;
+
+	/* Pass the allocated quotas back to the user. */
+	bind->recv_quota = service->recv_quota;
+	bind->send_quota = service->send_quota;
+	bind->msg_size = vs_service_max_mbuf_size(service);
+
+	device_unlock(&service->dev);
+	module_put(vs_devio_server_driver.driver.owner);
+
+	return 0;
+
+fail_bind_driver:
+	ret = service->dev.bus->remove(&service->dev);
+fail_probe_driver:
+	service->dev.driver = NULL;
+fail_device_unbound:
+	device_unlock(&service->dev);
+	module_put(vs_devio_server_driver.driver.owner);
+fail_module_get:
+	return ret;
+}
+
+/*
+ * This driver will be registered by the core client module, which must also
+ * set its bus and owner pointers.
+ */
+struct vs_service_driver vs_devio_client_driver = {
+	/* No protocol, so the normal bus match will never bind this. */
+	.protocol	= NULL,
+	.is_server	= false,
+	.rx_atomic	= true,
+
+	.probe		= vs_devio_service_probe,
+	.remove		= vs_devio_service_remove,
+	.receive	= vs_devio_service_receive,
+	.notify		= vs_devio_service_notify,
+	.start		= vs_devio_service_start,
+	.reset		= vs_devio_service_reset,
+
+	.driver		= {
+		.name			= "vservices-client-devio",
+		.owner			= NULL, /* set by core client */
+		.bus			= NULL, /* set by core client */
+		.suppress_bind_attrs	= true, /* see vs_devio_poll */
+	},
+};
+EXPORT_SYMBOL_GPL(vs_devio_client_driver);
+
+static int
+vs_devio_bind_client(struct vs_service_device *service,
+		struct vs_ioctl_bind *bind)
+{
+	int ret = -ENODEV;
+
+	/* Ensure the client module is loaded and the driver is registered. */
+	if (!try_module_get(vs_devio_client_driver.driver.owner))
+		goto fail_module_get;
+
+	device_lock(&service->dev);
+	ret = -EBUSY;
+	if (service->dev.driver != NULL)
+		goto fail_device_unbound;
+
+	/* Manually probe the driver. */
+	service->dev.driver = &vs_devio_client_driver.driver;
+	ret = service->dev.bus->probe(&service->dev);
+	if (ret < 0)
+		goto fail_probe_driver;
+
+	ret = device_bind_driver(&service->dev);
+	if (ret < 0)
+		goto fail_bind_driver;
+
+	/* Pass the allocated quotas back to the user. */
+	bind->recv_quota = service->recv_quota;
+	bind->send_quota = service->send_quota;
+	bind->msg_size = vs_service_max_mbuf_size(service);
+	bind->send_notify_bits = service->notify_send_bits;
+	bind->recv_notify_bits = service->notify_recv_bits;
+
+	device_unlock(&service->dev);
+	module_put(vs_devio_client_driver.driver.owner);
+
+	return 0;
+
+fail_bind_driver:
+	ret = service->dev.bus->remove(&service->dev);
+fail_probe_driver:
+	service->dev.driver = NULL;
+fail_device_unbound:
+	device_unlock(&service->dev);
+	module_put(vs_devio_client_driver.driver.owner);
+fail_module_get:
+	return ret;
+}
+
+static struct vs_devio_priv *
+vs_devio_priv_get_from_service(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = NULL;
+	struct device_driver *drv;
+
+	if (!service)
+		return NULL;
+
+	device_lock(&service->dev);
+	drv = service->dev.driver;
+
+	if ((drv == &vs_devio_client_driver.driver) ||
+			(drv == &vs_devio_server_driver.driver)) {
+		vs_service_state_lock(service);
+		priv = dev_get_drvdata(&service->dev);
+		if (priv)
+			kref_get(&priv->kref);
+		vs_service_state_unlock(service);
+	}
+
+	device_unlock(&service->dev);
+
+	return priv;
+}
+
+static int
+vs_devio_open(struct inode *inode, struct file *file)
+{
+	struct vs_service_device *service;
+
+	if (imajor(inode) != vservices_cdev_major)
+		return -ENODEV;
+
+	service = vs_service_lookup_by_devt(inode->i_rdev);
+	if (!service)
+		return -ENODEV;
+
+	file->private_data = service;
+
+	return 0;
+}
+
+static int
+vs_devio_release(struct inode *inode, struct file *file)
+{
+	struct vs_service_device *service = file->private_data;
+
+	if (service) {
+		struct vs_devio_priv *priv =
+			vs_devio_priv_get_from_service(service);
+
+		if (priv) {
+			device_release_driver(&service->dev);
+			vs_devio_priv_put(priv);
+		}
+
+		file->private_data = NULL;
+		vs_put_service(service);
+	}
+
+	return 0;
+}
+
+static struct iovec *
+vs_devio_check_iov(struct vs_ioctl_iovec *io, bool is_send, ssize_t *total)
+{
+	struct iovec *iov;
+	unsigned i;
+	int ret;
+
+	if (io->iovcnt > UIO_MAXIOV)
+		return ERR_PTR(-EINVAL);
+
+	iov = kmalloc(sizeof(*iov) * io->iovcnt, GFP_KERNEL);
+	if (!iov)
+		return ERR_PTR(-ENOMEM);
+
+	if (copy_from_user(iov, io->iov, sizeof(*iov) * io->iovcnt)) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	*total = 0;
+	for (i = 0; i < io->iovcnt; i++) {
+		ssize_t iov_len = (ssize_t)iov[i].iov_len;
+
+		if (iov_len > MAX_RW_COUNT - *total) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		if (!access_ok(is_send ? VERIFY_READ : VERIFY_WRITE,
+					iov[i].iov_base, iov_len)) {
+			ret = -EFAULT;
+			goto fail;
+		}
+
+		*total += iov_len;
+	}
+
+	return iov;
+
+fail:
+	kfree(iov);
+	return ERR_PTR(ret);
+}
+
+static ssize_t
+vs_devio_send(struct vs_service_device *service, struct iovec *iov,
+		size_t iovcnt, ssize_t to_send, bool nonblocking)
+{
+	struct vs_mbuf *mbuf = NULL;
+	struct vs_devio_priv *priv;
+	unsigned i;
+	ssize_t offset = 0;
+	ssize_t ret;
+	DEFINE_WAIT(wait);
+
+	priv = vs_devio_priv_get_from_service(service);
+	ret = -ENODEV;
+	if (!priv)
+		goto fail_priv_get;
+
+	vs_service_state_lock(service);
+
+	/*
+	 * Waiting alloc. We must open-code this because there is no real
+	 * state structure or base state.
+	 */
+	ret = 0;
+	while (!vs_service_send_mbufs_available(service)) {
+		if (nonblocking) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		prepare_to_wait_exclusive(&service->quota_wq, &wait,
+				TASK_INTERRUPTIBLE);
+
+		vs_service_state_unlock(service);
+		schedule();
+		vs_service_state_lock(service);
+
+		if (priv->reset) {
+			ret = -ECONNRESET;
+			break;
+		}
+
+		if (!priv->running) {
+			ret = -ENOTCONN;
+			break;
+		}
+	}
+	finish_wait(&service->quota_wq, &wait);
+
+	if (ret)
+		goto fail_alloc;
+
+	mbuf = vs_service_alloc_mbuf(service, to_send, GFP_KERNEL);
+	if (IS_ERR(mbuf)) {
+		ret = PTR_ERR(mbuf);
+		goto fail_alloc;
+	}
+
+	/* Ready to send; copy data into the mbuf. */
+	ret = -EFAULT;
+	for (i = 0; i < iovcnt; i++) {
+		if (copy_from_user(mbuf->data + offset, iov[i].iov_base,
+					iov[i].iov_len))
+			goto fail_copy;
+		offset += iov[i].iov_len;
+	}
+	mbuf->size = to_send;
+
+	/* Send the message. */
+	ret = vs_service_send(service, mbuf);
+	if (ret < 0)
+		goto fail_send;
+
+	/* Wake the next waiter, if there's more quota available. */
+	if (waitqueue_active(&service->quota_wq) &&
+			vs_service_send_mbufs_available(service) > 0)
+		wake_up(&service->quota_wq);
+
+	vs_service_state_unlock(service);
+	vs_devio_priv_put(priv);
+
+	return to_send;
+
+fail_send:
+fail_copy:
+	vs_service_free_mbuf(service, mbuf);
+	wake_up(&service->quota_wq);
+fail_alloc:
+	vs_service_state_unlock(service);
+	vs_devio_priv_put(priv);
+fail_priv_get:
+	return ret;
+}
+
+static ssize_t
+vs_devio_recv(struct vs_service_device *service, struct iovec *iov,
+		size_t iovcnt, u32 *notify_bits, ssize_t recv_space,
+		bool nonblocking)
+{
+	struct vs_mbuf *mbuf = NULL;
+	struct vs_devio_priv *priv;
+	unsigned i;
+	ssize_t offset = 0;
+	ssize_t ret;
+	DEFINE_WAIT(wait);
+
+	priv = vs_devio_priv_get_from_service(service);
+	ret = -ENODEV;
+	if (!priv)
+		goto fail_priv_get;
+
+	/* Take the recv_wq lock, which also protects recv_queue. */
+	spin_lock_irq(&priv->recv_wq.lock);
+
+	/* Wait for a message, notification, or reset. */
+	ret = wait_event_interruptible_exclusive_locked_irq(priv->recv_wq,
+			!list_empty(&priv->recv_queue) || priv->reset ||
+			atomic_read(&priv->notify_pending) || nonblocking);
+
+	if (priv->reset)
+		ret = -ECONNRESET; /* Service reset */
+	else if (!ret && list_empty(&priv->recv_queue))
+		ret = -EAGAIN; /* Nonblocking, or notification */
+
+	if (ret < 0) {
+		spin_unlock_irq(&priv->recv_wq.lock);
+		goto no_mbuf;
+	}
+
+	/* Take the first mbuf from the list, and check its size. */
+	mbuf = list_first_entry(&priv->recv_queue, struct vs_mbuf, queue);
+	if (mbuf->size > recv_space) {
+		spin_unlock_irq(&priv->recv_wq.lock);
+		ret = -EMSGSIZE;
+		goto fail_msg_size;
+	}
+	list_del_init(&mbuf->queue);
+
+	spin_unlock_irq(&priv->recv_wq.lock);
+
+	/* Copy to user. */
+	ret = -EFAULT;
+	for (i = 0; (mbuf->size > offset) && (i < iovcnt); i++) {
+		size_t len = min(mbuf->size - offset, iov[i].iov_len);
+		if (copy_to_user(iov[i].iov_base, mbuf->data + offset, len))
+			goto fail_copy;
+		offset += len;
+	}
+	ret = offset;
+
+no_mbuf:
+	/*
+	 * Read and clear the pending notification bits. If any notifications
+	 * are received, don't return an error, even if we failed to receive a
+	 * message.
+	 */
+	*notify_bits = atomic_xchg(&priv->notify_pending, 0);
+	if ((ret < 0) && *notify_bits)
+		ret = 0;
+
+fail_copy:
+	if (mbuf)
+		vs_service_free_mbuf(service, mbuf);
+fail_msg_size:
+	vs_devio_priv_put(priv);
+fail_priv_get:
+	return ret;
+}
+
+static int
+vs_devio_check_perms(struct file *file, unsigned flags)
+{
+	if ((flags & MAY_READ) & !(file->f_mode & FMODE_READ))
+		return -EBADF;
+
+	if ((flags & MAY_WRITE) & !(file->f_mode & FMODE_WRITE))
+		return -EBADF;
+
+	return security_file_permission(file, flags);
+}
+
+static long
+vs_devio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *ptr = (void __user *)arg;
+	struct vs_service_device *service = file->private_data;
+	struct vs_ioctl_bind bind;
+	struct vs_ioctl_iovec io;
+	u32 flags;
+	long ret;
+	ssize_t iov_total;
+	struct iovec *iov;
+
+	if (!service)
+		return -ENODEV;
+
+	switch (cmd) {
+	case IOCTL_VS_RESET_SERVICE:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		ret = vs_service_reset(service, service);
+		break;
+	case IOCTL_VS_GET_NAME:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (service->name != NULL) {
+			size_t len = strnlen(service->name,
+					_IOC_SIZE(IOCTL_VS_GET_NAME) - 1);
+			if (copy_to_user(ptr, service->name, len + 1))
+				ret = -EFAULT;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case IOCTL_VS_GET_PROTOCOL:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (service->protocol != NULL) {
+			size_t len = strnlen(service->protocol,
+					_IOC_SIZE(IOCTL_VS_GET_PROTOCOL) - 1);
+			if (copy_to_user(ptr, service->protocol, len + 1))
+				ret = -EFAULT;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case IOCTL_VS_BIND_CLIENT:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		ret = vs_devio_bind_client(service, &bind);
+		if (!ret && copy_to_user(ptr, &bind, sizeof(bind)))
+			ret = -EFAULT;
+		break;
+	case IOCTL_VS_BIND_SERVER:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&bind, ptr, sizeof(bind))) {
+			ret = -EFAULT;
+			break;
+		}
+		ret = vs_devio_bind_server(service, &bind);
+		if (!ret && copy_to_user(ptr, &bind, sizeof(bind)))
+			ret = -EFAULT;
+		break;
+	case IOCTL_VS_NOTIFY:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&flags, ptr, sizeof(flags))) {
+			ret = -EFAULT;
+			break;
+		}
+		ret = vs_service_notify(service, flags);
+		break;
+	case IOCTL_VS_SEND:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&io, ptr, sizeof(io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_iov(&io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_send(service, iov, io.iovcnt, iov_total,
+				file->f_flags & O_NONBLOCK);
+		kfree(iov);
+		break;
+	case IOCTL_VS_RECV:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&io, ptr, sizeof(io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_iov(&io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_recv(service, iov, io.iovcnt,
+			&io.notify_bits, iov_total,
+			file->f_flags & O_NONBLOCK);
+		kfree(iov);
+
+		if (ret >= 0) {
+			u32 __user *notify_bits_ptr = ptr + offsetof(
+					struct vs_ioctl_iovec, notify_bits);
+			if (copy_to_user(notify_bits_ptr, &io.notify_bits,
+					sizeof(io.notify_bits)))
+				ret = -EFAULT;
+		}
+		break;
+	default:
+		dev_dbg(&service->dev, "Unknown ioctl %#x, arg: %lx\n", cmd,
+				arg);
+		ret = -ENOSYS;
+		break;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct vs_compat_ioctl_bind {
+	__u32 send_quota;
+	__u32 recv_quota;
+	__u32 send_notify_bits;
+	__u32 recv_notify_bits;
+	compat_size_t msg_size;
+};
+
+#define compat_ioctl_bind_conv(dest, src) ({ \
+	dest.send_quota = src.send_quota;		\
+	dest.recv_quota = src.recv_quota;		\
+	dest.send_notify_bits = src.send_notify_bits;	\
+	dest.recv_notify_bits = src.recv_notify_bits;	\
+	dest.msg_size = (compat_size_t)src.msg_size;	\
+})
+
+#define COMPAT_IOCTL_VS_BIND_CLIENT _IOR('4', 3, struct vs_compat_ioctl_bind)
+#define COMPAT_IOCTL_VS_BIND_SERVER _IOWR('4', 4, struct vs_compat_ioctl_bind)
+
+struct vs_compat_ioctl_iovec {
+	union {
+		__u32 iovcnt; /* input */
+		__u32 notify_bits; /* output (recv only) */
+	};
+	compat_uptr_t iov;
+};
+
+#define COMPAT_IOCTL_VS_SEND \
+    _IOW('4', 6, struct vs_compat_ioctl_iovec)
+#define COMPAT_IOCTL_VS_RECV \
+    _IOWR('4', 7, struct vs_compat_ioctl_iovec)
+
+static struct iovec *
+vs_devio_check_compat_iov(struct vs_compat_ioctl_iovec *c_io,
+	bool is_send, ssize_t *total)
+{
+	struct iovec *iov;
+	struct compat_iovec *c_iov;
+
+	unsigned i;
+	int ret;
+
+	if (c_io->iovcnt > UIO_MAXIOV)
+		return ERR_PTR(-EINVAL);
+
+	c_iov = kzalloc(sizeof(*c_iov) * c_io->iovcnt, GFP_KERNEL);
+	if (!c_iov)
+		return ERR_PTR(-ENOMEM);
+
+	iov = kzalloc(sizeof(*iov) * c_io->iovcnt, GFP_KERNEL);
+	if (!iov) {
+		kfree(c_iov);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (copy_from_user(c_iov, (struct compat_iovec __user *)
+		compat_ptr(c_io->iov), sizeof(*c_iov) * c_io->iovcnt)) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	*total = 0;
+	for (i = 0; i < c_io->iovcnt; i++) {
+		ssize_t iov_len;
+		iov[i].iov_base = compat_ptr (c_iov[i].iov_base);
+		iov[i].iov_len = (compat_size_t) c_iov[i].iov_len;
+
+		iov_len = (ssize_t)iov[i].iov_len;
+
+		if (iov_len > MAX_RW_COUNT - *total) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		if (!access_ok(is_send ? VERIFY_READ : VERIFY_WRITE,
+					iov[i].iov_base, iov_len)) {
+			ret = -EFAULT;
+			goto fail;
+		}
+
+		*total += iov_len;
+	}
+
+	kfree (c_iov);
+	return iov;
+
+fail:
+	kfree(c_iov);
+	kfree(iov);
+	return ERR_PTR(ret);
+}
+
+static long
+vs_devio_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *ptr = (void __user *)arg;
+	struct vs_service_device *service = file->private_data;
+	struct vs_ioctl_bind bind;
+	struct vs_compat_ioctl_bind compat_bind;
+	struct vs_compat_ioctl_iovec compat_io;
+	long ret;
+	ssize_t iov_total;
+	struct iovec *iov;
+
+	if (!service)
+		return -ENODEV;
+
+	switch (cmd) {
+	case IOCTL_VS_RESET_SERVICE:
+	case IOCTL_VS_GET_NAME:
+	case IOCTL_VS_GET_PROTOCOL:
+		return vs_devio_ioctl (file, cmd, arg);
+	case COMPAT_IOCTL_VS_SEND:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&compat_io, ptr, sizeof(compat_io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_compat_iov(&compat_io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_send(service, iov, compat_io.iovcnt, iov_total,
+				file->f_flags & O_NONBLOCK);
+		kfree(iov);
+
+		break;
+	case COMPAT_IOCTL_VS_RECV:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&compat_io, ptr, sizeof(compat_io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_compat_iov(&compat_io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_recv(service, iov, compat_io.iovcnt,
+			&compat_io.notify_bits, iov_total,
+			file->f_flags & O_NONBLOCK);
+		kfree(iov);
+
+		if (ret >= 0) {
+			u32 __user *notify_bits_ptr = ptr + offsetof(
+					struct vs_compat_ioctl_iovec, notify_bits);
+			if (copy_to_user(notify_bits_ptr, &compat_io.notify_bits,
+					sizeof(compat_io.notify_bits)))
+				ret = -EFAULT;
+		}
+		break;
+	case COMPAT_IOCTL_VS_BIND_CLIENT:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		ret = vs_devio_bind_client(service, &bind);
+		compat_ioctl_bind_conv(compat_bind, bind);
+		if (!ret && copy_to_user(ptr, &compat_bind,
+					sizeof(compat_bind)))
+			ret = -EFAULT;
+		break;
+	case COMPAT_IOCTL_VS_BIND_SERVER:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&compat_bind, ptr, sizeof(compat_bind))) {
+			ret = -EFAULT;
+			break;
+		}
+		compat_ioctl_bind_conv(bind, compat_bind);
+		ret = vs_devio_bind_server(service, &bind);
+		compat_ioctl_bind_conv(compat_bind, bind);
+		if (!ret && copy_to_user(ptr, &compat_bind,
+					sizeof(compat_bind)))
+			ret = -EFAULT;
+		break;
+	default:
+		dev_dbg(&service->dev, "Unknown ioctl %#x, arg: %lx\n", cmd,
+				arg);
+		ret = -ENOSYS;
+		break;
+	}
+
+	return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
+static unsigned int
+vs_devio_poll(struct file *file, struct poll_table_struct *wait)
+{
+	struct vs_service_device *service = file->private_data;
+	struct vs_devio_priv *priv = vs_devio_priv_get_from_service(service);
+	unsigned int flags = 0;
+
+	poll_wait(file, &service->quota_wq, wait);
+
+	if (priv) {
+		/*
+		 * Note: there is no way for us to ensure that all poll
+		 * waiters on a given workqueue have gone away, other than to
+		 * actually close the file. So, this poll_wait() is only safe
+		 * if we never release our claim on the service before the
+		 * file is closed.
+		 *
+		 * We try to guarantee this by only unbinding the devio driver
+		 * on close, and setting suppress_bind_attrs in the driver so
+		 * root can't unbind us with sysfs.
+		 */
+		poll_wait(file, &priv->recv_wq, wait);
+
+		if (priv->reset) {
+			/* Service reset; raise poll error. */
+			flags |= POLLERR | POLLHUP;
+		} else if (priv->running) {
+			if (!list_empty_careful(&priv->recv_queue))
+				flags |= POLLRDNORM | POLLIN;
+			if (atomic_read(&priv->notify_pending))
+				flags |= POLLRDNORM | POLLIN;
+			if (vs_service_send_mbufs_available(service) > 0)
+				flags |= POLLWRNORM | POLLOUT;
+		}
+
+		vs_devio_priv_put(priv);
+	} else {
+		/* No driver attached. Return error flags. */
+		flags |= POLLERR | POLLHUP;
+	}
+
+	return flags;
+}
+
+static const struct file_operations vs_fops = {
+	.owner		= THIS_MODULE,
+	.open		= vs_devio_open,
+	.release	= vs_devio_release,
+	.unlocked_ioctl	= vs_devio_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= vs_devio_compat_ioctl,
+#endif
+	.poll		= vs_devio_poll,
+};
+
+int vservices_cdev_major;
+static struct cdev vs_cdev;
+
+int __init
+vs_devio_init(void)
+{
+	dev_t dev;
+	int r;
+
+	r = alloc_chrdev_region(&dev, 0, VSERVICES_DEVICE_MAX,
+			"vs_service");
+	if (r < 0)
+		goto fail_alloc_chrdev;
+	vservices_cdev_major = MAJOR(dev);
+
+	cdev_init(&vs_cdev, &vs_fops);
+	r = cdev_add(&vs_cdev, dev, VSERVICES_DEVICE_MAX);
+	if (r < 0)
+		goto fail_cdev_add;
+
+	return 0;
+
+fail_cdev_add:
+	unregister_chrdev_region(dev, VSERVICES_DEVICE_MAX);
+fail_alloc_chrdev:
+	return r;
+}
+
+void __exit
+vs_devio_exit(void)
+{
+	cdev_del(&vs_cdev);
+	unregister_chrdev_region(MKDEV(vservices_cdev_major, 0),
+			VSERVICES_DEVICE_MAX);
+}
diff --git a/drivers/vservices/protocol/Kconfig b/drivers/vservices/protocol/Kconfig
new file mode 100644
index 0000000..e0f2798c
--- /dev/null
+++ b/drivers/vservices/protocol/Kconfig
@@ -0,0 +1,44 @@
+#
+# vServices protocol drivers configuration
+#
+
+if VSERVICES_SERVER || VSERVICES_CLIENT
+
+menu "Protocol drivers"
+config VSERVICES_PROTOCOL_BLOCK
+	bool
+
+config VSERVICES_PROTOCOL_BLOCK_SERVER
+	tristate "Block server protocol"
+	depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+	select VSERVICES_PROTOCOL_BLOCK
+	help
+	  This option adds support for Virtual Services block protocol server.
+
+config VSERVICES_PROTOCOL_BLOCK_CLIENT
+	tristate "Block client protocol"
+	depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+	select VSERVICES_PROTOCOL_BLOCK
+	help
+	  This option adds support for Virtual Services block protocol client.
+
+config VSERVICES_PROTOCOL_SERIAL
+	bool
+
+config VSERVICES_PROTOCOL_SERIAL_SERVER
+	tristate "Serial server protocol"
+	depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+	select VSERVICES_PROTOCOL_SERIAL
+	help
+	  This option adds support for Virtual Services serial protocol server.
+
+config VSERVICES_PROTOCOL_SERIAL_CLIENT
+	tristate "Serial client protocol"
+	depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+	select VSERVICES_PROTOCOL_SERIAL
+	help
+	  This option adds support for Virtual Services serial protocol client.
+
+endmenu
+
+endif # VSERVICES_SERVER || VSERVICES_CLIENT
diff --git a/drivers/vservices/protocol/Makefile b/drivers/vservices/protocol/Makefile
new file mode 100644
index 0000000..0c714e0
--- /dev/null
+++ b/drivers/vservices/protocol/Makefile
@@ -0,0 +1,5 @@
+# This is a autogenerated Makefile for vservice-linux-stacks
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += core/
+obj-$(CONFIG_VSERVICES_PROTOCOL_BLOCK) += block/
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL) += serial/
diff --git a/drivers/vservices/protocol/block/Makefile b/drivers/vservices/protocol/block/Makefile
new file mode 100644
index 0000000..325b57e
--- /dev/null
+++ b/drivers/vservices/protocol/block/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_BLOCK_SERVER)	+= vservices_protocol_block_server.o
+vservices_protocol_block_server-objs = server.o
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_BLOCK_CLIENT)	+= vservices_protocol_block_client.o
+vservices_protocol_block_client-objs = client.o
diff --git a/drivers/vservices/protocol/block/client.c b/drivers/vservices/protocol/block/client.c
new file mode 100644
index 0000000..702a30a8
--- /dev/null
+++ b/drivers/vservices/protocol/block/client.c
@@ -0,0 +1,1186 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the block client protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+static int _vs_client_block_req_open(struct vs_client_block_state *_state);
+
+/*** Linux driver model integration ***/
+struct vs_block_client_driver {
+	struct vs_client_block *client;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+        container_of(d, struct vs_block_client_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	int i __maybe_unused;
+
+	/* Clear out pending  read commands */
+	for_each_set_bit(i, state->state.io.read_bitmask,
+			 VSERVICE_BLOCK_IO_READ_MAX_PENDING) {
+		void *tag = state->state.io.read_tags[i];
+
+		if (client->io.nack_read)
+			client->io.nack_read(state, tag,
+					     VSERVICE_BLOCK_SERVICE_RESET);
+
+		__clear_bit(i, state->state.io.read_bitmask);
+	}
+
+	/* Clear out pending  write commands */
+	for_each_set_bit(i, state->state.io.write_bitmask,
+			 VSERVICE_BLOCK_IO_WRITE_MAX_PENDING) {
+		void *tag = state->state.io.write_tags[i];
+
+		if (client->io.nack_write)
+			client->io.nack_write(state, tag,
+					      VSERVICE_BLOCK_SERVICE_RESET);
+
+		__clear_bit(i, state->state.io.write_bitmask);
+	}
+
+}
+
+static void block_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	_vs_client_block_req_open(state);
+
+	vs_service_state_unlock(service);
+}
+
+static void block_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (client->closed)
+		client->closed(state);
+
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void block_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	_vs_client_block_req_open(state);
+
+	vs_service_state_unlock_bh(service);
+}
+
+static void block_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock_bh(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (client->closed)
+		client->closed(state);
+
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static int block_client_probe(struct vs_service_device *service);
+static int block_client_remove(struct vs_service_device *service);
+static int block_handle_message(struct vs_service_device *service,
+				struct vs_mbuf *_mbuf);
+static void block_handle_notify(struct vs_service_device *service,
+				uint32_t flags);
+static void block_handle_start(struct vs_service_device *service);
+static void block_handle_start_bh(struct vs_service_device *service);
+static void block_handle_reset(struct vs_service_device *service);
+static void block_handle_reset_bh(struct vs_service_device *service);
+static int block_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_block_client_register(struct vs_client_block *client,
+				     const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_block_client_driver *driver;
+
+	if (client->tx_atomic && !client->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	client->driver = &driver->vsdrv;
+	driver->client = client;
+
+	driver->vsdrv.protocol = VSERVICE_BLOCK_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = false;
+	driver->vsdrv.rx_atomic = client->rx_atomic;
+	driver->vsdrv.tx_atomic = client->tx_atomic;
+
+	driver->vsdrv.probe = block_client_probe;
+	driver->vsdrv.remove = block_client_remove;
+	driver->vsdrv.receive = block_handle_message;
+	driver->vsdrv.notify = block_handle_notify;
+	driver->vsdrv.start = client->tx_atomic ?
+	    block_handle_start_bh : block_handle_start;
+	driver->vsdrv.reset = client->tx_atomic ?
+	    block_handle_reset_bh : block_handle_reset;
+	driver->vsdrv.tx_ready = block_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	client->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_block_client_register);
+
+int vservice_block_client_unregister(struct vs_client_block *client)
+{
+	struct vs_block_client_driver *driver;
+
+	if (!client->driver)
+		return 0;
+
+	driver = to_client_driver(client->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	client->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_block_client_unregister);
+
+static int block_client_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client = to_client_driver(vsdrv)->client;
+	struct vs_client_block_state *state;
+
+	state = client->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int block_client_remove(struct vs_service_device *service)
+{
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client = to_client_driver(vsdrv)->client;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	client->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int block_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client = to_client_driver(vsdrv)->client;
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+		return 0;
+
+	if (client->tx_ready)
+		client->tx_ready(state);
+
+	return 0;
+}
+
+static int _vs_client_block_req_open(struct vs_client_block_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_block *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_REQ_OPEN;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_block_req_open);
+static int _vs_client_block_req_close(struct vs_client_block_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_block *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_REQ_CLOSE;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_block_req_close);
+static int _vs_client_block_req_reopen(struct vs_client_block_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_block *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_REQ_REOPEN;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_block_req_reopen);
+static int
+block_base_handle_ack_open(const struct vs_client_block *_client,
+			   struct vs_client_block_state *_state,
+			   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 28UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+	_state->io.sector_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	_state->io.segment_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+	_state->readonly =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	_state->sector_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	_state->segment_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+	_state->device_sectors =
+	    *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   12UL);
+	_state->flushable =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL);
+	_state->committable =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	_client->opened(_state);
+	return 0;
+
+}
+
+static int
+block_base_handle_nack_open(const struct vs_client_block *_client,
+			    struct vs_client_block_state *_state,
+			    struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	dev_err(&VS_STATE_SERVICE_PTR(_state)->dev,
+		"Open operation failed for device %s\n",
+		VS_STATE_SERVICE_PTR(_state)->name);
+
+	return 0;
+
+}
+
+EXPORT_SYMBOL(block_base_handle_ack_open);
+static int
+block_base_handle_ack_close(const struct vs_client_block *_client,
+			    struct vs_client_block_state *_state,
+			    struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return 0;
+
+}
+
+static int
+block_base_handle_nack_close(const struct vs_client_block *_client,
+			     struct vs_client_block_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return 0;
+
+}
+
+EXPORT_SYMBOL(block_base_handle_ack_close);
+static int
+block_base_handle_ack_reopen(const struct vs_client_block *_client,
+			     struct vs_client_block_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE__RESET;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->reopened) {
+		_client->reopened(_state);
+		return 0;
+	}
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return _vs_client_block_req_open(_state);
+
+}
+
+static int
+block_base_handle_nack_reopen(const struct vs_client_block *_client,
+			      struct vs_client_block_state *_state,
+			      struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return 0;
+
+}
+
+EXPORT_SYMBOL(block_base_handle_ack_reopen);
+int vs_client_block_io_getbufs_ack_read(struct vs_client_block_state *_state,
+					struct vs_pbuf *data,
+					struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_ACK_READ;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->io.segment_size + 8UL;
+	const size_t _min_size = _max_size - _state->io.segment_size;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	data->size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	data->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL +
+			   sizeof(uint32_t));
+	data->max_size = data->size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->io.segment_size - data->size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_getbufs_ack_read);
+int vs_client_block_io_free_ack_read(struct vs_client_block_state *_state,
+				     struct vs_pbuf *data,
+				     struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_free_ack_read);
+struct vs_mbuf *vs_client_block_io_alloc_req_write(struct vs_client_block_state
+						   *_state,
+						   struct vs_pbuf *data,
+						   gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_REQ_WRITE;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + _state->io.segment_size + 32UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!data)
+		goto fail;
+	data->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   28UL + sizeof(uint32_t));
+	data->size = _state->io.segment_size;
+	data->max_size = data->size;
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_alloc_req_write);
+int vs_client_block_io_free_req_write(struct vs_client_block_state *_state,
+				      struct vs_pbuf *data,
+				      struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_free_req_write);
+int
+vs_client_block_io_req_read(struct vs_client_block_state *_state, void *_opaque,
+			    uint64_t sector_index, uint32_t num_sects,
+			    bool nodelay, bool flush, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 24UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_block *_client =
+	    to_client_driver(vsdrv)->client;
+	uint32_t _opaque_tmp;
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	_opaque_tmp =
+	    find_first_zero_bit(_state->state.io.read_bitmask,
+				VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+	if (_opaque_tmp >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+		return -EPROTO;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = VSERVICE_BLOCK_IO_REQ_READ;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _opaque_tmp;
+	*(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    sector_index;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+	    num_sects;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+	    nodelay;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+	    flush;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.io.read_tags[_opaque_tmp] = _opaque;
+	__set_bit(_opaque_tmp, _state->state.io.read_bitmask);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_req_read);
+int
+vs_client_block_io_req_write(struct vs_client_block_state *_state,
+			     void *_opaque, uint64_t sector_index,
+			     uint32_t num_sects, bool nodelay, bool flush,
+			     bool commit, struct vs_pbuf data,
+			     struct vs_mbuf *_mbuf)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_block *_client =
+	    to_client_driver(vsdrv)->client;
+	uint32_t _opaque_tmp;
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	_opaque_tmp =
+	    find_first_zero_bit(_state->state.io.write_bitmask,
+				VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+	if (_opaque_tmp >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+		return -EPROTO;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_BLOCK_IO_REQ_WRITE)
+
+		return -EINVAL;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _opaque_tmp;
+	*(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    sector_index;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+	    num_sects;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+	    nodelay;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+	    flush;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+	    commit;
+	if ((data.size + sizeof(vs_message_id_t) + 28UL) > VS_MBUF_SIZE(_mbuf))
+		return -EINVAL;
+
+	if (data.size < data.max_size)
+		VS_MBUF_SIZE(_mbuf) -= (data.max_size - data.size);
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 28UL) =
+	    data.size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.io.write_tags[_opaque_tmp] = _opaque;
+	__set_bit(_opaque_tmp, _state->state.io.write_bitmask);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_req_write);
+static int
+block_io_handle_ack_read(const struct vs_client_block *_client,
+			 struct vs_client_block_state *_state,
+			 struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->io.segment_size + 8UL;
+	void *_opaque;
+	struct vs_pbuf data;
+	const size_t _min_size = _max_size - _state->io.segment_size;
+	size_t _exact_size;
+	uint32_t _opaque_tmp;
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+	_opaque_tmp =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	if (_opaque_tmp >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque_tmp, _state->state.io.read_bitmask))
+		return -EPROTO;
+	_opaque = _state->state.io.read_tags[_opaque_tmp];
+	__clear_bit(_opaque_tmp, _state->state.io.read_bitmask);
+
+	data.size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	data.data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL +
+			   sizeof(uint32_t));
+	data.max_size = data.size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->io.segment_size - data.size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_client->io.ack_read)
+		return _client->io.ack_read(_state, _opaque, data, _mbuf);
+	return 0;
+}
+
+static int
+block_io_handle_nack_read(const struct vs_client_block *_client,
+			  struct vs_client_block_state *_state,
+			  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+	void *_opaque;
+	vservice_block_block_io_error_t err;
+	uint32_t _opaque_tmp;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	_opaque_tmp =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	if (_opaque_tmp >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque_tmp, _state->state.io.read_bitmask))
+		return -EPROTO;
+	_opaque = _state->state.io.read_tags[_opaque_tmp];
+	__clear_bit(_opaque_tmp, _state->state.io.read_bitmask);
+	err =
+	    *(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+						  sizeof(vs_message_id_t) +
+						  4UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->io.nack_read)
+		return _client->io.nack_read(_state, _opaque, err);
+	return 0;
+}
+
+EXPORT_SYMBOL(block_io_handle_ack_read);
+static int
+block_io_handle_ack_write(const struct vs_client_block *_client,
+			  struct vs_client_block_state *_state,
+			  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	void *_opaque;
+	uint32_t _opaque_tmp;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	_opaque_tmp =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	if (_opaque_tmp >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque_tmp, _state->state.io.write_bitmask))
+		return -EPROTO;
+	_opaque = _state->state.io.write_tags[_opaque_tmp];
+	__clear_bit(_opaque_tmp, _state->state.io.write_bitmask);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->io.ack_write)
+		return _client->io.ack_write(_state, _opaque);
+	return 0;
+}
+
+static int
+block_io_handle_nack_write(const struct vs_client_block *_client,
+			   struct vs_client_block_state *_state,
+			   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+	void *_opaque;
+	vservice_block_block_io_error_t err;
+	uint32_t _opaque_tmp;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	_opaque_tmp =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	if (_opaque_tmp >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque_tmp, _state->state.io.write_bitmask))
+		return -EPROTO;
+	_opaque = _state->state.io.write_tags[_opaque_tmp];
+	__clear_bit(_opaque_tmp, _state->state.io.write_bitmask);
+	err =
+	    *(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+						  sizeof(vs_message_id_t) +
+						  4UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->io.nack_write)
+		return _client->io.nack_write(_state, _opaque, err);
+	return 0;
+}
+
+EXPORT_SYMBOL(block_io_handle_ack_write);
+static int
+block_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_client_block_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_block *client =
+	    to_client_driver(vsdrv)->client;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+	case VSERVICE_BLOCK_BASE_ACK_OPEN:
+		ret = block_base_handle_ack_open(client, state, _mbuf);
+		break;
+	case VSERVICE_BLOCK_BASE_NACK_OPEN:
+		ret = block_base_handle_nack_open(client, state, _mbuf);
+		break;
+
+/* command in sync close */
+	case VSERVICE_BLOCK_BASE_ACK_CLOSE:
+		ret = block_base_handle_ack_close(client, state, _mbuf);
+		break;
+	case VSERVICE_BLOCK_BASE_NACK_CLOSE:
+		ret = block_base_handle_nack_close(client, state, _mbuf);
+		break;
+
+/* command in sync reopen */
+	case VSERVICE_BLOCK_BASE_ACK_REOPEN:
+		ret = block_base_handle_ack_reopen(client, state, _mbuf);
+		break;
+	case VSERVICE_BLOCK_BASE_NACK_REOPEN:
+		ret = block_base_handle_nack_reopen(client, state, _mbuf);
+		break;
+
+/** interface block_io **/
+/* command in parallel read */
+	case VSERVICE_BLOCK_IO_ACK_READ:
+		ret = block_io_handle_ack_read(client, state, _mbuf);
+		break;
+	case VSERVICE_BLOCK_IO_NACK_READ:
+		ret = block_io_handle_nack_read(client, state, _mbuf);
+		break;
+
+/* command in parallel write */
+	case VSERVICE_BLOCK_IO_ACK_WRITE:
+		ret = block_io_handle_ack_write(client, state, _mbuf);
+		break;
+	case VSERVICE_BLOCK_IO_NACK_WRITE:
+		ret = block_io_handle_nack_write(client, state, _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void block_handle_notify(struct vs_service_device *service,
+				uint32_t notify_bits)
+{
+	__maybe_unused struct vs_client_block_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_block *client =
+	    to_client_driver(vsdrv)->client;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface block_io **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+int vs_client_block_reopen(struct vs_client_block_state *_state)
+{
+	return _vs_client_block_req_reopen(_state);
+}
+
+EXPORT_SYMBOL(vs_client_block_reopen);
+
+int vs_client_block_close(struct vs_client_block_state *_state)
+{
+	return _vs_client_block_req_close(_state);
+}
+
+EXPORT_SYMBOL(vs_client_block_close);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services blockClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/block/server.c b/drivers/vservices/protocol/block/server.c
new file mode 100644
index 0000000..a4a7d1a
--- /dev/null
+++ b/drivers/vservices/protocol/block/server.c
@@ -0,0 +1,1371 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the block server protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_block_server_driver {
+	struct vs_server_block *server;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+        container_of(d, struct vs_block_server_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void block_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void block_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (server->closed)
+		server->closed(state);
+
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void block_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static void block_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock_bh(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (server->closed)
+		server->closed(state);
+
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static int block_server_probe(struct vs_service_device *service);
+static int block_server_remove(struct vs_service_device *service);
+static int block_handle_message(struct vs_service_device *service,
+				struct vs_mbuf *_mbuf);
+static void block_handle_notify(struct vs_service_device *service,
+				uint32_t flags);
+static void block_handle_start(struct vs_service_device *service);
+static void block_handle_start_bh(struct vs_service_device *service);
+static void block_handle_reset(struct vs_service_device *service);
+static void block_handle_reset_bh(struct vs_service_device *service);
+static int block_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_block_server_register(struct vs_server_block *server,
+				     const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_block_server_driver *driver;
+
+	if (server->tx_atomic && !server->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	server->driver = &driver->vsdrv;
+	driver->server = server;
+
+	driver->vsdrv.protocol = VSERVICE_BLOCK_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = true;
+	driver->vsdrv.rx_atomic = server->rx_atomic;
+	driver->vsdrv.tx_atomic = server->tx_atomic;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.in_quota_min = 1;
+	driver->vsdrv.in_quota_best = server->in_quota_best ?
+	    server->in_quota_best : driver->vsdrv.in_quota_min;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.out_quota_min = 1;
+	driver->vsdrv.out_quota_best = server->out_quota_best ?
+	    server->out_quota_best : driver->vsdrv.out_quota_min;
+	driver->vsdrv.in_notify_count = VSERVICE_BLOCK_NBIT_IN__COUNT;
+	driver->vsdrv.out_notify_count = VSERVICE_BLOCK_NBIT_OUT__COUNT;
+
+	driver->vsdrv.probe = block_server_probe;
+	driver->vsdrv.remove = block_server_remove;
+	driver->vsdrv.receive = block_handle_message;
+	driver->vsdrv.notify = block_handle_notify;
+	driver->vsdrv.start = server->tx_atomic ?
+	    block_handle_start_bh : block_handle_start;
+	driver->vsdrv.reset = server->tx_atomic ?
+	    block_handle_reset_bh : block_handle_reset;
+	driver->vsdrv.tx_ready = block_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	server->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_block_server_register);
+
+int vservice_block_server_unregister(struct vs_server_block *server)
+{
+	struct vs_block_server_driver *driver;
+
+	if (!server->driver)
+		return 0;
+
+	driver = to_server_driver(server->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	server->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_block_server_unregister);
+
+static int block_server_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server = to_server_driver(vsdrv)->server;
+	struct vs_server_block_state *state;
+
+	state = server->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int block_server_remove(struct vs_service_device *service)
+{
+	struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server = to_server_driver(vsdrv)->server;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	server->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int block_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server = to_server_driver(vsdrv)->server;
+	struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+		return 0;
+
+	if (server->tx_ready)
+		server->tx_ready(state);
+
+	return 0;
+}
+
+static int
+vs_server_block_send_ack_open(struct vs_server_block_state *_state, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 28UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_ACK_OPEN;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _state->readonly;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    _state->sector_size;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL) =
+	    _state->segment_size;
+	*(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+	    _state->device_sectors;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+	    _state->flushable;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+	    _state->committable;
+	_state->io.sector_size = _state->sector_size;
+	_state->io.segment_size = _state->segment_size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_ack_open);
+static int
+vs_server_block_send_nack_open(struct vs_server_block_state *_state,
+			       gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_NACK_OPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_nack_open);
+static int
+vs_server_block_send_ack_close(struct vs_server_block_state *_state,
+			       gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_ACK_CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_ack_close);
+static int
+vs_server_block_send_nack_close(struct vs_server_block_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_NACK_CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_nack_close);
+static int
+vs_server_block_send_ack_reopen(struct vs_server_block_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_ACK_REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE__RESET;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_ack_reopen);
+static int
+vs_server_block_send_nack_reopen(struct vs_server_block_state *_state,
+				 gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_NACK_REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_nack_reopen);
+static int
+vs_server_block_handle_req_open(const struct vs_server_block *_server,
+				struct vs_server_block_state *_state,
+				struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->open)
+		return vs_server_block_open_complete(_state,
+						     _server->open(_state));
+	return vs_server_block_open_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_block_open_complete(struct vs_server_block_state *_state,
+				  vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS)
+		ret =
+		    vs_server_block_send_ack_open(_state,
+						  vs_service_has_atomic_rx
+						  (VS_STATE_SERVICE_PTR(_state))
+						  ? GFP_ATOMIC : GFP_KERNEL);
+	else if (resp == VS_SERVER_RESP_FAILURE)
+		ret =
+		    vs_server_block_send_nack_open(_state,
+						   vs_service_has_atomic_rx
+						   (VS_STATE_SERVICE_PTR
+						    (_state)) ? GFP_ATOMIC :
+						   GFP_KERNEL);
+
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_block_open_complete);
+
+EXPORT_SYMBOL(vs_server_block_handle_req_open);
+static int
+vs_server_block_handle_req_close(const struct vs_server_block *_server,
+				 struct vs_server_block_state *_state,
+				 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->close)
+		return vs_server_block_close_complete(_state,
+						      _server->close(_state));
+	return vs_server_block_close_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_block_close_complete(struct vs_server_block_state *_state,
+				   vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS)
+		ret =
+		    vs_server_block_send_ack_close(_state,
+						   vs_service_has_atomic_rx
+						   (VS_STATE_SERVICE_PTR
+						    (_state)) ? GFP_ATOMIC :
+						   GFP_KERNEL);
+	else if (resp == VS_SERVER_RESP_FAILURE)
+		ret =
+		    vs_server_block_send_nack_close(_state,
+						    vs_service_has_atomic_rx
+						    (VS_STATE_SERVICE_PTR
+						     (_state)) ? GFP_ATOMIC :
+						    GFP_KERNEL);
+	if ((resp == VS_SERVER_RESP_SUCCESS) && (ret == 0)) {
+		wake_up_all(&_state->service->quota_wq);
+	}
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_block_close_complete);
+
+EXPORT_SYMBOL(vs_server_block_handle_req_close);
+static int
+vs_server_block_handle_req_reopen(const struct vs_server_block *_server,
+				  struct vs_server_block_state *_state,
+				  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->reopen)
+		return vs_server_block_reopen_complete(_state,
+						       _server->reopen(_state));
+	else
+		return vs_server_block_send_nack_reopen(_state,
+							vs_service_has_atomic_rx
+							(VS_STATE_SERVICE_PTR
+							 (_state)) ? GFP_ATOMIC
+							: GFP_KERNEL);
+
+}
+
+int vs_server_block_reopen_complete(struct vs_server_block_state *_state,
+				    vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS) {
+		_state->io.sector_size = _state->sector_size;
+		_state->io.segment_size = _state->segment_size;
+		ret =
+		    vs_server_block_send_ack_reopen(_state,
+						    vs_service_has_atomic_rx
+						    (VS_STATE_SERVICE_PTR
+						     (_state)) ? GFP_ATOMIC :
+						    GFP_KERNEL);
+	} else if (resp == VS_SERVER_RESP_FAILURE) {
+		ret =
+		    vs_server_block_send_nack_reopen(_state,
+						     vs_service_has_atomic_rx
+						     (VS_STATE_SERVICE_PTR
+						      (_state)) ? GFP_ATOMIC :
+						     GFP_KERNEL);
+	}
+
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_block_reopen_complete);
+
+EXPORT_SYMBOL(vs_server_block_handle_req_reopen);
+struct vs_mbuf *vs_server_block_io_alloc_ack_read(struct vs_server_block_state
+						  *_state, struct vs_pbuf *data,
+						  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_ACK_READ;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + _state->io.segment_size + 8UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!data)
+		goto fail;
+	data->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL +
+			   sizeof(uint32_t));
+	data->size = _state->io.segment_size;
+	data->max_size = data->size;
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_alloc_ack_read);
+int vs_server_block_io_free_ack_read(struct vs_server_block_state *_state,
+				     struct vs_pbuf *data,
+				     struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_free_ack_read);
+int vs_server_block_io_getbufs_req_write(struct vs_server_block_state *_state,
+					 struct vs_pbuf *data,
+					 struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_REQ_WRITE;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->io.segment_size + 32UL;
+	const size_t _min_size = _max_size - _state->io.segment_size;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	data->size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   28UL);
+	data->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   28UL + sizeof(uint32_t));
+	data->max_size = data->size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->io.segment_size - data->size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_getbufs_req_write);
+int vs_server_block_io_free_req_write(struct vs_server_block_state *_state,
+				      struct vs_pbuf *data,
+				      struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_free_req_write);
+int
+vs_server_block_io_send_ack_read(struct vs_server_block_state *_state,
+				 uint32_t _opaque, struct vs_pbuf data,
+				 struct vs_mbuf *_mbuf)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	if (_opaque >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque, _state->state.io.read_bitmask))
+		return -EPROTO;
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_BLOCK_IO_ACK_READ)
+
+		return -EINVAL;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _opaque;
+	if ((data.size + sizeof(vs_message_id_t) + 4UL) > VS_MBUF_SIZE(_mbuf))
+		return -EINVAL;
+
+	if (data.size < data.max_size)
+		VS_MBUF_SIZE(_mbuf) -= (data.max_size - data.size);
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    data.size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	__clear_bit(_opaque, _state->state.io.read_bitmask);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_ack_read);
+int
+vs_server_block_io_send_nack_read(struct vs_server_block_state *_state,
+				  uint32_t _opaque,
+				  vservice_block_block_io_error_t err,
+				  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	if (_opaque >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque, _state->state.io.read_bitmask))
+		return -EPROTO;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_IO_NACK_READ;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _opaque;
+	*(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+					      sizeof(vs_message_id_t) + 4UL) =
+	    err;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	__clear_bit(_opaque, _state->state.io.read_bitmask);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_nack_read);
+int
+vs_server_block_io_send_ack_write(struct vs_server_block_state *_state,
+				  uint32_t _opaque, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	if (_opaque >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque, _state->state.io.write_bitmask))
+		return -EPROTO;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_IO_ACK_WRITE;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _opaque;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	__clear_bit(_opaque, _state->state.io.write_bitmask);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_ack_write);
+int
+vs_server_block_io_send_nack_write(struct vs_server_block_state *_state,
+				   uint32_t _opaque,
+				   vservice_block_block_io_error_t err,
+				   gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	if (_opaque >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque, _state->state.io.write_bitmask))
+		return -EPROTO;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_IO_NACK_WRITE;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _opaque;
+	*(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+					      sizeof(vs_message_id_t) + 4UL) =
+	    err;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	__clear_bit(_opaque, _state->state.io.write_bitmask);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_nack_write);
+static int
+vs_server_block_io_handle_req_read(const struct vs_server_block *_server,
+				   struct vs_server_block_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 24UL;
+	uint32_t _opaque;
+	uint64_t sector_index;
+	uint32_t num_sects;
+	bool nodelay;
+	bool flush;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	_opaque =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	if (test_bit(_opaque, _state->state.io.read_bitmask))
+		return -EPROTO;
+	__set_bit(_opaque, _state->state.io.read_bitmask);
+	_opaque =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	sector_index =
+	    *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	num_sects =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   12UL);
+	nodelay =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL);
+	flush =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->io.req_read)
+		return _server->io.req_read(_state, _opaque, sector_index,
+					    num_sects, nodelay, flush);
+	else
+		dev_warn(&_state->service->dev,
+			 "[%s:%d] Protocol warning: No handler registered for _server->io.req_read, command will never be acknowledged\n",
+			 __func__, __LINE__);
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_handle_req_read);
+static int
+vs_server_block_io_handle_req_write(const struct vs_server_block *_server,
+				    struct vs_server_block_state *_state,
+				    struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->io.segment_size + 32UL;
+	uint32_t _opaque;
+	uint64_t sector_index;
+	uint32_t num_sects;
+	bool nodelay;
+	bool flush;
+	bool commit;
+	struct vs_pbuf data;
+	const size_t _min_size = _max_size - _state->io.segment_size;
+	size_t _exact_size;
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+	_opaque =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	if (test_bit(_opaque, _state->state.io.write_bitmask))
+		return -EPROTO;
+	__set_bit(_opaque, _state->state.io.write_bitmask);
+	_opaque =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	sector_index =
+	    *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	num_sects =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   12UL);
+	nodelay =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL);
+	flush =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL);
+	commit =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL);
+	data.size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   28UL);
+	data.data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   28UL + sizeof(uint32_t));
+	data.max_size = data.size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->io.segment_size - data.size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_server->io.req_write)
+		return _server->io.req_write(_state, _opaque, sector_index,
+					     num_sects, nodelay, flush, commit,
+					     data, _mbuf);
+	else
+		dev_warn(&_state->service->dev,
+			 "[%s:%d] Protocol warning: No handler registered for _server->io.req_write, command will never be acknowledged\n",
+			 __func__, __LINE__);
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_handle_req_write);
+static int
+block_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_server_block_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_block *server =
+	    to_server_driver(vsdrv)->server;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+	case VSERVICE_BLOCK_BASE_REQ_OPEN:
+		ret = vs_server_block_handle_req_open(server, state, _mbuf);
+		break;
+
+/* command in sync close */
+	case VSERVICE_BLOCK_BASE_REQ_CLOSE:
+		ret = vs_server_block_handle_req_close(server, state, _mbuf);
+		break;
+
+/* command in sync reopen */
+	case VSERVICE_BLOCK_BASE_REQ_REOPEN:
+		ret = vs_server_block_handle_req_reopen(server, state, _mbuf);
+		break;
+
+/** interface block_io **/
+/* command in parallel read */
+	case VSERVICE_BLOCK_IO_REQ_READ:
+		ret = vs_server_block_io_handle_req_read(server, state, _mbuf);
+		break;
+
+/* command in parallel write */
+	case VSERVICE_BLOCK_IO_REQ_WRITE:
+		ret = vs_server_block_io_handle_req_write(server, state, _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void block_handle_notify(struct vs_service_device *service,
+				uint32_t notify_bits)
+{
+	__maybe_unused struct vs_server_block_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_block *server =
+	    to_server_driver(vsdrv)->server;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface block_io **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services blockServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/core/Makefile b/drivers/vservices/protocol/core/Makefile
new file mode 100644
index 0000000..6bef7f5
--- /dev/null
+++ b/drivers/vservices/protocol/core/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_SERVER) += vservices_protocol_core_server.o
+vservices_protocol_core_server-objs = server.o
+
+obj-$(CONFIG_VSERVICES_CLIENT) += vservices_protocol_core_client.o
+vservices_protocol_core_client-objs = client.o
diff --git a/drivers/vservices/protocol/core/client.c b/drivers/vservices/protocol/core/client.c
new file mode 100644
index 0000000..2dd2136
--- /dev/null
+++ b/drivers/vservices/protocol/core/client.c
@@ -0,0 +1,1069 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the core client protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_core_client_driver {
+	struct vs_client_core *client;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+        container_of(d, struct vs_core_client_driver, vsdrv)
+
+static void core_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->start)
+		client->start(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->reset)
+		client->reset(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->start)
+		client->start(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static void core_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->reset)
+		client->reset(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static int core_client_probe(struct vs_service_device *service);
+static int core_client_remove(struct vs_service_device *service);
+static int core_handle_message(struct vs_service_device *service,
+			       struct vs_mbuf *_mbuf);
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t flags);
+static void core_handle_start(struct vs_service_device *service);
+static void core_handle_start_bh(struct vs_service_device *service);
+static void core_handle_reset(struct vs_service_device *service);
+static void core_handle_reset_bh(struct vs_service_device *service);
+static int core_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_core_client_register(struct vs_client_core *client,
+				    const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_core_client_driver *driver;
+
+	if (client->tx_atomic && !client->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	client->driver = &driver->vsdrv;
+	driver->client = client;
+
+	driver->vsdrv.protocol = VSERVICE_CORE_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = false;
+	driver->vsdrv.rx_atomic = client->rx_atomic;
+	driver->vsdrv.tx_atomic = client->tx_atomic;
+
+	driver->vsdrv.probe = core_client_probe;
+	driver->vsdrv.remove = core_client_remove;
+	driver->vsdrv.receive = core_handle_message;
+	driver->vsdrv.notify = core_handle_notify;
+	driver->vsdrv.start = client->tx_atomic ?
+	    core_handle_start_bh : core_handle_start;
+	driver->vsdrv.reset = client->tx_atomic ?
+	    core_handle_reset_bh : core_handle_reset;
+	driver->vsdrv.tx_ready = core_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	client->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_core_client_register);
+
+int vservice_core_client_unregister(struct vs_client_core *client)
+{
+	struct vs_core_client_driver *driver;
+
+	if (!client->driver)
+		return 0;
+
+	driver = to_client_driver(client->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	client->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_core_client_unregister);
+
+static int core_client_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client = to_client_driver(vsdrv)->client;
+	struct vs_client_core_state *state;
+
+	state = client->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int core_client_remove(struct vs_service_device *service)
+{
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client = to_client_driver(vsdrv)->client;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	client->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int core_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client = to_client_driver(vsdrv)->client;
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+
+	if (client->tx_ready)
+		client->tx_ready(state);
+
+	return 0;
+}
+
+int vs_client_core_core_getbufs_service_created(struct vs_client_core_state
+						*_state,
+						struct vs_string *service_name,
+						struct vs_string *protocol_name,
+						struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+	    VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+	const size_t _min_size = _max_size - VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	service_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	service_name->max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+
+	protocol_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+	protocol_name->max_size =
+	    VS_MBUF_SIZE(_mbuf) - (sizeof(vs_message_id_t) +
+				   VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+
+	/* Now check the size received is the exact size expected */
+	_exact_size =
+	    _max_size - (VSERVICE_CORE_PROTOCOL_NAME_SIZE -
+			 protocol_name->max_size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_getbufs_service_created);
+int vs_client_core_core_free_service_created(struct vs_client_core_state
+					     *_state,
+					     struct vs_string *service_name,
+					     struct vs_string *protocol_name,
+					     struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_free_service_created);
+int
+vs_client_core_core_req_connect(struct vs_client_core_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_core *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_REQ_CONNECT;
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED__CONNECT;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT);
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_req_connect);
+int
+vs_client_core_core_req_disconnect(struct vs_client_core_state *_state,
+				   gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_core *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_REQ_DISCONNECT;
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED__DISCONNECT;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT);
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_req_disconnect);
+static int
+core_core_handle_ack_connect(const struct vs_client_core *_client,
+			     struct vs_client_core_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.ack_connect)
+		return _client->core.ack_connect(_state);
+	return 0;
+}
+
+static int
+core_core_handle_nack_connect(const struct vs_client_core *_client,
+			      struct vs_client_core_state *_state,
+			      struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.nack_connect)
+		return _client->core.nack_connect(_state);
+	return 0;
+}
+
+EXPORT_SYMBOL(core_core_handle_ack_connect);
+static int
+core_core_handle_ack_disconnect(const struct vs_client_core *_client,
+				struct vs_client_core_state *_state,
+				struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.ack_disconnect)
+		return _client->core.ack_disconnect(_state);
+	return 0;
+}
+
+static int
+core_core_handle_nack_disconnect(const struct vs_client_core *_client,
+				 struct vs_client_core_state *_state,
+				 struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.nack_disconnect)
+		return _client->core.nack_disconnect(_state);
+	return 0;
+}
+
+EXPORT_SYMBOL(core_core_handle_ack_disconnect);
+static int
+vs_client_core_core_handle_startup(const struct vs_client_core *_client,
+				   struct vs_client_core_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+	uint32_t core_in_quota;
+	uint32_t core_out_quota;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_OFFLINE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state, VSERVICE_CORE_STATE_OFFLINE,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+	core_in_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	core_out_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_startup)
+		return _client->core.msg_startup(_state, core_in_quota,
+						 core_out_quota);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_startup);
+static int
+vs_client_core_core_handle_shutdown(const struct vs_client_core *_client,
+				    struct vs_client_core_state *_state,
+				    struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_client->core.state_change)
+			_client->core.state_change(_state,
+						   VSERVICE_CORE_STATE_DISCONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+	case VSERVICE_CORE_STATE_CONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_client->core.state_change)
+			_client->core.state_change(_state,
+						   VSERVICE_CORE_STATE_CONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+
+	default:
+		break;
+	}
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_shutdown)
+		return _client->core.msg_shutdown(_state);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_shutdown);
+static int
+vs_client_core_core_handle_service_created(const struct vs_client_core *_client,
+					   struct vs_client_core_state *_state,
+					   struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+	    VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+	uint32_t service_id;
+	struct vs_string service_name;
+	struct vs_string protocol_name;
+	const size_t _min_size = _max_size - VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+	size_t _exact_size;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	service_name.ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	service_name.max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+
+	protocol_name.ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+	protocol_name.max_size =
+	    VS_MBUF_SIZE(_mbuf) - (sizeof(vs_message_id_t) +
+				   VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+
+	/* Now check the size received is the exact size expected */
+	_exact_size =
+	    _max_size - (VSERVICE_CORE_PROTOCOL_NAME_SIZE -
+			 protocol_name.max_size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_client->core.msg_service_created)
+		return _client->core.msg_service_created(_state, service_id,
+							 service_name,
+							 protocol_name, _mbuf);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_created);
+static int
+vs_client_core_core_handle_service_removed(const struct vs_client_core *_client,
+					   struct vs_client_core_state *_state,
+					   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	uint32_t service_id;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_service_removed)
+		return _client->core.msg_service_removed(_state, service_id);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_removed);
+static int
+vs_client_core_core_handle_server_ready(const struct vs_client_core *_client,
+					struct vs_client_core_state *_state,
+					struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 28UL;
+	uint32_t service_id;
+	uint32_t in_quota;
+	uint32_t out_quota;
+	uint32_t in_bit_offset;
+	uint32_t in_num_bits;
+	uint32_t out_bit_offset;
+	uint32_t out_num_bits;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	in_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	out_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+	in_bit_offset =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   12UL);
+	in_num_bits =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   16UL);
+	out_bit_offset =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   20UL);
+	out_num_bits =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   24UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_server_ready)
+		return _client->core.msg_server_ready(_state, service_id,
+						      in_quota, out_quota,
+						      in_bit_offset,
+						      in_num_bits,
+						      out_bit_offset,
+						      out_num_bits);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_server_ready);
+static int
+vs_client_core_core_handle_service_reset(const struct vs_client_core *_client,
+					 struct vs_client_core_state *_state,
+					 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	uint32_t service_id;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_service_reset)
+		return _client->core.msg_service_reset(_state, service_id);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_reset);
+int
+vs_client_core_core_send_service_reset(struct vs_client_core_state *_state,
+				       uint32_t service_id, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_core *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVICE_RESET;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_send_service_reset);
+static int
+core_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_client_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_core *client =
+	    to_client_driver(vsdrv)->client;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface core **/
+/* command in sync connect */
+	case VSERVICE_CORE_CORE_ACK_CONNECT:
+		ret = core_core_handle_ack_connect(client, state, _mbuf);
+		break;
+	case VSERVICE_CORE_CORE_NACK_CONNECT:
+		ret = core_core_handle_nack_connect(client, state, _mbuf);
+		break;
+
+/* command in sync disconnect */
+	case VSERVICE_CORE_CORE_ACK_DISCONNECT:
+		ret = core_core_handle_ack_disconnect(client, state, _mbuf);
+		break;
+	case VSERVICE_CORE_CORE_NACK_DISCONNECT:
+		ret = core_core_handle_nack_disconnect(client, state, _mbuf);
+		break;
+
+/* message startup */
+	case VSERVICE_CORE_CORE_MSG_STARTUP:
+		ret = vs_client_core_core_handle_startup(client, state, _mbuf);
+		break;
+
+/* message shutdown */
+	case VSERVICE_CORE_CORE_MSG_SHUTDOWN:
+		ret = vs_client_core_core_handle_shutdown(client, state, _mbuf);
+		break;
+
+/* message service_created */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
+		ret =
+		    vs_client_core_core_handle_service_created(client, state,
+							       _mbuf);
+		break;
+
+/* message service_removed */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
+		ret =
+		    vs_client_core_core_handle_service_removed(client, state,
+							       _mbuf);
+		break;
+
+/* message server_ready */
+	case VSERVICE_CORE_CORE_MSG_SERVER_READY:
+		ret =
+		    vs_client_core_core_handle_server_ready(client, state,
+							    _mbuf);
+		break;
+
+/* message service_reset */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+		ret =
+		    vs_client_core_core_handle_service_reset(client, state,
+							     _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t notify_bits)
+{
+	__maybe_unused struct vs_client_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_core *client =
+	    to_client_driver(vsdrv)->client;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface core **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services coreClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/core/server.c b/drivers/vservices/protocol/core/server.c
new file mode 100644
index 0000000..c3f3686
--- /dev/null
+++ b/drivers/vservices/protocol/core/server.c
@@ -0,0 +1,1226 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the core server protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_core_server_driver {
+	struct vs_server_core *server;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+        container_of(d, struct vs_core_server_driver, vsdrv)
+
+static void core_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->start)
+		server->start(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->reset)
+		server->reset(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->start)
+		server->start(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static void core_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->reset)
+		server->reset(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static int core_server_probe(struct vs_service_device *service);
+static int core_server_remove(struct vs_service_device *service);
+static int core_handle_message(struct vs_service_device *service,
+			       struct vs_mbuf *_mbuf);
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t flags);
+static void core_handle_start(struct vs_service_device *service);
+static void core_handle_start_bh(struct vs_service_device *service);
+static void core_handle_reset(struct vs_service_device *service);
+static void core_handle_reset_bh(struct vs_service_device *service);
+static int core_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_core_server_register(struct vs_server_core *server,
+				    const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_core_server_driver *driver;
+
+	if (server->tx_atomic && !server->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	server->driver = &driver->vsdrv;
+	driver->server = server;
+
+	driver->vsdrv.protocol = VSERVICE_CORE_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = true;
+	driver->vsdrv.rx_atomic = server->rx_atomic;
+	driver->vsdrv.tx_atomic = server->tx_atomic;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.in_quota_min = 1;
+	driver->vsdrv.in_quota_best = server->in_quota_best ?
+	    server->in_quota_best : driver->vsdrv.in_quota_min;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.out_quota_min = 1;
+	driver->vsdrv.out_quota_best = server->out_quota_best ?
+	    server->out_quota_best : driver->vsdrv.out_quota_min;
+	driver->vsdrv.in_notify_count = VSERVICE_CORE_NBIT_IN__COUNT;
+	driver->vsdrv.out_notify_count = VSERVICE_CORE_NBIT_OUT__COUNT;
+
+	driver->vsdrv.probe = core_server_probe;
+	driver->vsdrv.remove = core_server_remove;
+	driver->vsdrv.receive = core_handle_message;
+	driver->vsdrv.notify = core_handle_notify;
+	driver->vsdrv.start = server->tx_atomic ?
+	    core_handle_start_bh : core_handle_start;
+	driver->vsdrv.reset = server->tx_atomic ?
+	    core_handle_reset_bh : core_handle_reset;
+	driver->vsdrv.tx_ready = core_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	server->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_core_server_register);
+
+int vservice_core_server_unregister(struct vs_server_core *server)
+{
+	struct vs_core_server_driver *driver;
+
+	if (!server->driver)
+		return 0;
+
+	driver = to_server_driver(server->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	server->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_core_server_unregister);
+
+static int core_server_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server = to_server_driver(vsdrv)->server;
+	struct vs_server_core_state *state;
+
+	state = server->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int core_server_remove(struct vs_service_device *service)
+{
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server = to_server_driver(vsdrv)->server;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	server->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int core_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server = to_server_driver(vsdrv)->server;
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+
+	if (server->tx_ready)
+		server->tx_ready(state);
+
+	return 0;
+}
+
+struct vs_mbuf *vs_server_core_core_alloc_service_created(struct
+							  vs_server_core_state
+							  *_state,
+							  struct vs_string
+							  *service_name,
+							  struct vs_string
+							  *protocol_name,
+							  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+	    VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!service_name)
+		goto fail;
+	service_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	service_name->max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+	if (!protocol_name)
+		goto fail;
+	protocol_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+	protocol_name->max_size = VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_alloc_service_created);
+int vs_server_core_core_free_service_created(struct vs_server_core_state
+					     *_state,
+					     struct vs_string *service_name,
+					     struct vs_string *protocol_name,
+					     struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_free_service_created);
+int
+vs_server_core_core_send_ack_connect(struct vs_server_core_state *_state,
+				     gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_ACK_CONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_ack_connect);
+int
+vs_server_core_core_send_nack_connect(struct vs_server_core_state *_state,
+				      gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_NACK_CONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_nack_connect);
+int
+vs_server_core_core_send_ack_disconnect(struct vs_server_core_state *_state,
+					gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_ACK_DISCONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_ack_disconnect);
+int
+vs_server_core_core_send_nack_disconnect(struct vs_server_core_state *_state,
+					 gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_NACK_DISCONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_nack_disconnect);
+static int
+vs_server_core_core_handle_req_connect(const struct vs_server_core *_server,
+				       struct vs_server_core_state *_state,
+				       struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED__CONNECT;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->core.req_connect)
+		return _server->core.req_connect(_state);
+	else
+		dev_warn(&_state->service->dev,
+			 "[%s:%d] Protocol warning: No handler registered for _server->core.req_connect, command will never be acknowledged\n",
+			 __func__, __LINE__);
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_req_connect);
+static int
+vs_server_core_core_handle_req_disconnect(const struct vs_server_core *_server,
+					  struct vs_server_core_state *_state,
+					  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED__DISCONNECT;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->core.req_disconnect)
+		return _server->core.req_disconnect(_state);
+	else
+		dev_warn(&_state->service->dev,
+			 "[%s:%d] Protocol warning: No handler registered for _server->core.req_disconnect, command will never be acknowledged\n",
+			 __func__, __LINE__);
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_req_disconnect);
+int
+vs_server_core_core_send_startup(struct vs_server_core_state *_state,
+				 uint32_t core_in_quota,
+				 uint32_t core_out_quota, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_OFFLINE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_STARTUP;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    core_in_quota;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    core_out_quota;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state, VSERVICE_CORE_STATE_OFFLINE,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_startup);
+int
+vs_server_core_core_send_shutdown(struct vs_server_core_state *_state,
+				  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SHUTDOWN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_server->core.state_change)
+			_server->core.state_change(_state,
+						   VSERVICE_CORE_STATE_DISCONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+	case VSERVICE_CORE_STATE_CONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_server->core.state_change)
+			_server->core.state_change(_state,
+						   VSERVICE_CORE_STATE_CONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_shutdown);
+int
+vs_server_core_core_send_service_created(struct vs_server_core_state *_state,
+					 uint32_t service_id,
+					 struct vs_string service_name,
+					 struct vs_string protocol_name,
+					 struct vs_mbuf *_mbuf)
+{
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_CORE_CORE_MSG_SERVICE_CREATED)
+
+		return -EINVAL;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+	{
+		size_t _size = strnlen(service_name.ptr, service_name.max_size);
+		if ((_size + sizeof(vs_message_id_t) + 4UL) >
+		    VS_MBUF_SIZE(_mbuf))
+			return -EINVAL;
+
+		memset(service_name.ptr + _size, 0,
+		       service_name.max_size - _size);
+	}
+	{
+		size_t _size =
+		    strnlen(protocol_name.ptr, protocol_name.max_size);
+		if ((_size + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL) >
+		    VS_MBUF_SIZE(_mbuf))
+			return -EINVAL;
+
+		if (_size < protocol_name.max_size)
+			VS_MBUF_SIZE(_mbuf) -= (protocol_name.max_size - _size);
+
+	}
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_created);
+int
+vs_server_core_core_send_service_removed(struct vs_server_core_state *_state,
+					 uint32_t service_id, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_removed);
+int
+vs_server_core_core_send_server_ready(struct vs_server_core_state *_state,
+				      uint32_t service_id, uint32_t in_quota,
+				      uint32_t out_quota,
+				      uint32_t in_bit_offset,
+				      uint32_t in_num_bits,
+				      uint32_t out_bit_offset,
+				      uint32_t out_num_bits, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 28UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVER_READY;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    in_quota;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL) =
+	    out_quota;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+	    in_bit_offset;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+	    in_num_bits;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+	    out_bit_offset;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+	    out_num_bits;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_server_ready);
+int
+vs_server_core_core_send_service_reset(struct vs_server_core_state *_state,
+				       uint32_t service_id, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVICE_RESET;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_reset);
+static int
+vs_server_core_core_handle_service_reset(const struct vs_server_core *_server,
+					 struct vs_server_core_state *_state,
+					 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	uint32_t service_id;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->core.msg_service_reset)
+		return _server->core.msg_service_reset(_state, service_id);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_service_reset);
+static int
+core_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_server_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_core *server =
+	    to_server_driver(vsdrv)->server;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface core **/
+/* command in sync connect */
+	case VSERVICE_CORE_CORE_REQ_CONNECT:
+		ret =
+		    vs_server_core_core_handle_req_connect(server, state,
+							   _mbuf);
+		break;
+
+/* command in sync disconnect */
+	case VSERVICE_CORE_CORE_REQ_DISCONNECT:
+		ret =
+		    vs_server_core_core_handle_req_disconnect(server, state,
+							      _mbuf);
+		break;
+
+/* message service_reset */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+		ret =
+		    vs_server_core_core_handle_service_reset(server, state,
+							     _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t notify_bits)
+{
+	__maybe_unused struct vs_server_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_core *server =
+	    to_server_driver(vsdrv)->server;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface core **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services coreServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/serial/Makefile b/drivers/vservices/protocol/serial/Makefile
new file mode 100644
index 0000000..f5f29ed
--- /dev/null
+++ b/drivers/vservices/protocol/serial/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL_CLIENT) += vservices_protocol_serial_client.o
+vservices_protocol_serial_client-objs = client.o
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL_SERVER) += vservices_protocol_serial_server.o
+vservices_protocol_serial_server-objs = server.o
diff --git a/drivers/vservices/protocol/serial/client.c b/drivers/vservices/protocol/serial/client.c
new file mode 100644
index 0000000..1c37e72
--- /dev/null
+++ b/drivers/vservices/protocol/serial/client.c
@@ -0,0 +1,925 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the serial client protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+static int _vs_client_serial_req_open(struct vs_client_serial_state *_state);
+
+/*** Linux driver model integration ***/
+struct vs_serial_client_driver {
+	struct vs_client_serial *client;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+        container_of(d, struct vs_serial_client_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void serial_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	_vs_client_serial_req_open(state);
+
+	vs_service_state_unlock(service);
+}
+
+static void serial_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (client->closed)
+		client->closed(state);
+
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void serial_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	_vs_client_serial_req_open(state);
+
+	vs_service_state_unlock_bh(service);
+}
+
+static void serial_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock_bh(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (client->closed)
+		client->closed(state);
+
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static int serial_client_probe(struct vs_service_device *service);
+static int serial_client_remove(struct vs_service_device *service);
+static int serial_handle_message(struct vs_service_device *service,
+				 struct vs_mbuf *_mbuf);
+static void serial_handle_notify(struct vs_service_device *service,
+				 uint32_t flags);
+static void serial_handle_start(struct vs_service_device *service);
+static void serial_handle_start_bh(struct vs_service_device *service);
+static void serial_handle_reset(struct vs_service_device *service);
+static void serial_handle_reset_bh(struct vs_service_device *service);
+static int serial_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_serial_client_register(struct vs_client_serial *client,
+				      const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_serial_client_driver *driver;
+
+	if (client->tx_atomic && !client->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	client->driver = &driver->vsdrv;
+	driver->client = client;
+
+	driver->vsdrv.protocol = VSERVICE_SERIAL_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = false;
+	driver->vsdrv.rx_atomic = client->rx_atomic;
+	driver->vsdrv.tx_atomic = client->tx_atomic;
+
+	driver->vsdrv.probe = serial_client_probe;
+	driver->vsdrv.remove = serial_client_remove;
+	driver->vsdrv.receive = serial_handle_message;
+	driver->vsdrv.notify = serial_handle_notify;
+	driver->vsdrv.start = client->tx_atomic ?
+	    serial_handle_start_bh : serial_handle_start;
+	driver->vsdrv.reset = client->tx_atomic ?
+	    serial_handle_reset_bh : serial_handle_reset;
+	driver->vsdrv.tx_ready = serial_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	client->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_serial_client_register);
+
+int vservice_serial_client_unregister(struct vs_client_serial *client)
+{
+	struct vs_serial_client_driver *driver;
+
+	if (!client->driver)
+		return 0;
+
+	driver = to_client_driver(client->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	client->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_serial_client_unregister);
+
+static int serial_client_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+	struct vs_client_serial_state *state;
+
+	state = client->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int serial_client_remove(struct vs_service_device *service)
+{
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	client->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int serial_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+		return 0;
+
+	if (client->tx_ready)
+		client->tx_ready(state);
+
+	return 0;
+}
+
+static int _vs_client_serial_req_open(struct vs_client_serial_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_serial *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_REQ_OPEN;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_open);
+static int _vs_client_serial_req_close(struct vs_client_serial_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_serial *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_REQ_CLOSE;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_close);
+static int _vs_client_serial_req_reopen(struct vs_client_serial_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_serial *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_REQ_REOPEN;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_reopen);
+static int
+serial_base_handle_ack_open(const struct vs_client_serial *_client,
+			    struct vs_client_serial_state *_state,
+			    struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+	_state->serial.packet_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	_state->packet_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	_client->opened(_state);
+	return 0;
+
+}
+
+static int
+serial_base_handle_nack_open(const struct vs_client_serial *_client,
+			     struct vs_client_serial_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	dev_err(&VS_STATE_SERVICE_PTR(_state)->dev,
+		"Open operation failed for device %s\n",
+		VS_STATE_SERVICE_PTR(_state)->name);
+
+	return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_open);
+static int
+serial_base_handle_ack_close(const struct vs_client_serial *_client,
+			     struct vs_client_serial_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return 0;
+
+}
+
+static int
+serial_base_handle_nack_close(const struct vs_client_serial *_client,
+			      struct vs_client_serial_state *_state,
+			      struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_close);
+static int
+serial_base_handle_ack_reopen(const struct vs_client_serial *_client,
+			      struct vs_client_serial_state *_state,
+			      struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE__RESET;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->reopened) {
+		_client->reopened(_state);
+		return 0;
+	}
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return _vs_client_serial_req_open(_state);
+
+}
+
+static int
+serial_base_handle_nack_reopen(const struct vs_client_serial *_client,
+			       struct vs_client_serial_state *_state,
+			       struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_reopen);
+struct vs_mbuf *vs_client_serial_serial_alloc_msg(struct vs_client_serial_state
+						  *_state, struct vs_pbuf *b,
+						  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!b)
+		goto fail;
+	b->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b->size = _state->serial.packet_size;
+	b->max_size = b->size;
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_alloc_msg);
+int vs_client_serial_serial_getbufs_msg(struct vs_client_serial_state *_state,
+					struct vs_pbuf *b,
+					struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	const size_t _min_size = _max_size - _state->serial.packet_size;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	b->size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	b->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b->max_size = b->size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->serial.packet_size - b->size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_getbufs_msg);
+int vs_client_serial_serial_free_msg(struct vs_client_serial_state *_state,
+				     struct vs_pbuf *b, struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_free_msg);
+static int
+vs_client_serial_serial_handle_msg(const struct vs_client_serial *_client,
+				   struct vs_client_serial_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	struct vs_pbuf b;
+	const size_t _min_size = _max_size - _state->serial.packet_size;
+	size_t _exact_size;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	b.size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	b.data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b.max_size = b.size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->serial.packet_size - b.size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_client->serial.msg_msg)
+		return _client->serial.msg_msg(_state, b, _mbuf);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_handle_msg);
+int
+vs_client_serial_serial_send_msg(struct vs_client_serial_state *_state,
+				 struct vs_pbuf b, struct vs_mbuf *_mbuf)
+{
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_serial *_client =
+	    to_client_driver(vsdrv)->client;
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_SERIAL_SERIAL_MSG_MSG)
+
+		return -EINVAL;
+
+	if ((b.size + sizeof(vs_message_id_t) + 0UL) > VS_MBUF_SIZE(_mbuf))
+		return -EINVAL;
+
+	if (b.size < b.max_size)
+		VS_MBUF_SIZE(_mbuf) -= (b.max_size - b.size);
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    b.size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_send_msg);
+static int
+serial_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_client_serial_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_serial *client =
+	    to_client_driver(vsdrv)->client;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+	case VSERVICE_SERIAL_BASE_ACK_OPEN:
+		ret = serial_base_handle_ack_open(client, state, _mbuf);
+		break;
+	case VSERVICE_SERIAL_BASE_NACK_OPEN:
+		ret = serial_base_handle_nack_open(client, state, _mbuf);
+		break;
+
+/* command in sync close */
+	case VSERVICE_SERIAL_BASE_ACK_CLOSE:
+		ret = serial_base_handle_ack_close(client, state, _mbuf);
+		break;
+	case VSERVICE_SERIAL_BASE_NACK_CLOSE:
+		ret = serial_base_handle_nack_close(client, state, _mbuf);
+		break;
+
+/* command in sync reopen */
+	case VSERVICE_SERIAL_BASE_ACK_REOPEN:
+		ret = serial_base_handle_ack_reopen(client, state, _mbuf);
+		break;
+	case VSERVICE_SERIAL_BASE_NACK_REOPEN:
+		ret = serial_base_handle_nack_reopen(client, state, _mbuf);
+		break;
+
+/** interface serial **/
+/* message msg */
+	case VSERVICE_SERIAL_SERIAL_MSG_MSG:
+		ret = vs_client_serial_serial_handle_msg(client, state, _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void serial_handle_notify(struct vs_service_device *service,
+				 uint32_t notify_bits)
+{
+	__maybe_unused struct vs_client_serial_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_serial *client =
+	    to_client_driver(vsdrv)->client;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface serial **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+int vs_client_serial_reopen(struct vs_client_serial_state *_state)
+{
+	return _vs_client_serial_req_reopen(_state);
+}
+
+EXPORT_SYMBOL(vs_client_serial_reopen);
+
+int vs_client_serial_close(struct vs_client_serial_state *_state)
+{
+	return _vs_client_serial_req_close(_state);
+}
+
+EXPORT_SYMBOL(vs_client_serial_close);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services serialClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/serial/server.c b/drivers/vservices/protocol/serial/server.c
new file mode 100644
index 0000000..e5d1034
--- /dev/null
+++ b/drivers/vservices/protocol/serial/server.c
@@ -0,0 +1,1086 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the serial server protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_serial_server_driver {
+	struct vs_server_serial *server;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+        container_of(d, struct vs_serial_server_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void serial_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void serial_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (server->closed)
+		server->closed(state);
+
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void serial_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static void serial_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock_bh(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (server->closed)
+		server->closed(state);
+
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static int serial_server_probe(struct vs_service_device *service);
+static int serial_server_remove(struct vs_service_device *service);
+static int serial_handle_message(struct vs_service_device *service,
+				 struct vs_mbuf *_mbuf);
+static void serial_handle_notify(struct vs_service_device *service,
+				 uint32_t flags);
+static void serial_handle_start(struct vs_service_device *service);
+static void serial_handle_start_bh(struct vs_service_device *service);
+static void serial_handle_reset(struct vs_service_device *service);
+static void serial_handle_reset_bh(struct vs_service_device *service);
+static int serial_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_serial_server_register(struct vs_server_serial *server,
+				      const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_serial_server_driver *driver;
+
+	if (server->tx_atomic && !server->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	server->driver = &driver->vsdrv;
+	driver->server = server;
+
+	driver->vsdrv.protocol = VSERVICE_SERIAL_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = true;
+	driver->vsdrv.rx_atomic = server->rx_atomic;
+	driver->vsdrv.tx_atomic = server->tx_atomic;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.in_quota_min = 1;
+	driver->vsdrv.in_quota_best = server->in_quota_best ?
+	    server->in_quota_best : driver->vsdrv.in_quota_min;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.out_quota_min = 1;
+	driver->vsdrv.out_quota_best = server->out_quota_best ?
+	    server->out_quota_best : driver->vsdrv.out_quota_min;
+	driver->vsdrv.in_notify_count = VSERVICE_SERIAL_NBIT_IN__COUNT;
+	driver->vsdrv.out_notify_count = VSERVICE_SERIAL_NBIT_OUT__COUNT;
+
+	driver->vsdrv.probe = serial_server_probe;
+	driver->vsdrv.remove = serial_server_remove;
+	driver->vsdrv.receive = serial_handle_message;
+	driver->vsdrv.notify = serial_handle_notify;
+	driver->vsdrv.start = server->tx_atomic ?
+	    serial_handle_start_bh : serial_handle_start;
+	driver->vsdrv.reset = server->tx_atomic ?
+	    serial_handle_reset_bh : serial_handle_reset;
+	driver->vsdrv.tx_ready = serial_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	server->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_serial_server_register);
+
+int vservice_serial_server_unregister(struct vs_server_serial *server)
+{
+	struct vs_serial_server_driver *driver;
+
+	if (!server->driver)
+		return 0;
+
+	driver = to_server_driver(server->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	server->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_serial_server_unregister);
+
+static int serial_server_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+	struct vs_server_serial_state *state;
+
+	state = server->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int serial_server_remove(struct vs_service_device *service)
+{
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	server->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int serial_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+		return 0;
+
+	if (server->tx_ready)
+		server->tx_ready(state);
+
+	return 0;
+}
+
+static int
+vs_server_serial_send_ack_open(struct vs_server_serial_state *_state,
+			       gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_ACK_OPEN;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _state->packet_size;
+	_state->serial.packet_size = _state->packet_size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_open);
+static int
+vs_server_serial_send_nack_open(struct vs_server_serial_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_NACK_OPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_open);
+static int
+vs_server_serial_send_ack_close(struct vs_server_serial_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_ACK_CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_close);
+static int
+vs_server_serial_send_nack_close(struct vs_server_serial_state *_state,
+				 gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_NACK_CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_close);
+static int
+vs_server_serial_send_ack_reopen(struct vs_server_serial_state *_state,
+				 gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_ACK_REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE__RESET;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_reopen);
+static int
+vs_server_serial_send_nack_reopen(struct vs_server_serial_state *_state,
+				  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_NACK_REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_reopen);
+static int
+vs_server_serial_handle_req_open(const struct vs_server_serial *_server,
+				 struct vs_server_serial_state *_state,
+				 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->open)
+		return vs_server_serial_open_complete(_state,
+						      _server->open(_state));
+	return vs_server_serial_open_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_serial_open_complete(struct vs_server_serial_state *_state,
+				   vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS)
+		ret =
+		    vs_server_serial_send_ack_open(_state,
+						   vs_service_has_atomic_rx
+						   (VS_STATE_SERVICE_PTR
+						    (_state)) ? GFP_ATOMIC :
+						   GFP_KERNEL);
+	else if (resp == VS_SERVER_RESP_FAILURE)
+		ret =
+		    vs_server_serial_send_nack_open(_state,
+						    vs_service_has_atomic_rx
+						    (VS_STATE_SERVICE_PTR
+						     (_state)) ? GFP_ATOMIC :
+						    GFP_KERNEL);
+
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_open_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_open);
+static int
+vs_server_serial_handle_req_close(const struct vs_server_serial *_server,
+				  struct vs_server_serial_state *_state,
+				  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->close)
+		return vs_server_serial_close_complete(_state,
+						       _server->close(_state));
+	return vs_server_serial_close_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_serial_close_complete(struct vs_server_serial_state *_state,
+				    vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS)
+		ret =
+		    vs_server_serial_send_ack_close(_state,
+						    vs_service_has_atomic_rx
+						    (VS_STATE_SERVICE_PTR
+						     (_state)) ? GFP_ATOMIC :
+						    GFP_KERNEL);
+	else if (resp == VS_SERVER_RESP_FAILURE)
+		ret =
+		    vs_server_serial_send_nack_close(_state,
+						     vs_service_has_atomic_rx
+						     (VS_STATE_SERVICE_PTR
+						      (_state)) ? GFP_ATOMIC :
+						     GFP_KERNEL);
+	if ((resp == VS_SERVER_RESP_SUCCESS) && (ret == 0)) {
+		wake_up_all(&_state->service->quota_wq);
+	}
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_close_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_close);
+static int
+vs_server_serial_handle_req_reopen(const struct vs_server_serial *_server,
+				   struct vs_server_serial_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->reopen)
+		return vs_server_serial_reopen_complete(_state,
+							_server->
+							reopen(_state));
+	else
+		return vs_server_serial_send_nack_reopen(_state,
+							 vs_service_has_atomic_rx
+							 (VS_STATE_SERVICE_PTR
+							  (_state)) ? GFP_ATOMIC
+							 : GFP_KERNEL);
+
+}
+
+int vs_server_serial_reopen_complete(struct vs_server_serial_state *_state,
+				     vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS) {
+		ret =
+		    vs_server_serial_send_ack_reopen(_state,
+						     vs_service_has_atomic_rx
+						     (VS_STATE_SERVICE_PTR
+						      (_state)) ? GFP_ATOMIC :
+						     GFP_KERNEL);
+	} else if (resp == VS_SERVER_RESP_FAILURE) {
+		ret =
+		    vs_server_serial_send_nack_reopen(_state,
+						      vs_service_has_atomic_rx
+						      (VS_STATE_SERVICE_PTR
+						       (_state)) ? GFP_ATOMIC :
+						      GFP_KERNEL);
+	}
+
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_reopen_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_reopen);
+struct vs_mbuf *vs_server_serial_serial_alloc_msg(struct vs_server_serial_state
+						  *_state, struct vs_pbuf *b,
+						  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!b)
+		goto fail;
+	b->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b->size = _state->serial.packet_size;
+	b->max_size = b->size;
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_alloc_msg);
+int vs_server_serial_serial_getbufs_msg(struct vs_server_serial_state *_state,
+					struct vs_pbuf *b,
+					struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	const size_t _min_size = _max_size - _state->serial.packet_size;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	b->size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	b->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b->max_size = b->size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->serial.packet_size - b->size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_getbufs_msg);
+int vs_server_serial_serial_free_msg(struct vs_server_serial_state *_state,
+				     struct vs_pbuf *b, struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_free_msg);
+int
+vs_server_serial_serial_send_msg(struct vs_server_serial_state *_state,
+				 struct vs_pbuf b, struct vs_mbuf *_mbuf)
+{
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_SERIAL_SERIAL_MSG_MSG)
+
+		return -EINVAL;
+
+	if ((b.size + sizeof(vs_message_id_t) + 0UL) > VS_MBUF_SIZE(_mbuf))
+		return -EINVAL;
+
+	if (b.size < b.max_size)
+		VS_MBUF_SIZE(_mbuf) -= (b.max_size - b.size);
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    b.size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_send_msg);
+static int
+vs_server_serial_serial_handle_msg(const struct vs_server_serial *_server,
+				   struct vs_server_serial_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	struct vs_pbuf b;
+	const size_t _min_size = _max_size - _state->serial.packet_size;
+	size_t _exact_size;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	b.size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	b.data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b.max_size = b.size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->serial.packet_size - b.size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_server->serial.msg_msg)
+		return _server->serial.msg_msg(_state, b, _mbuf);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_handle_msg);
+static int
+serial_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_server_serial_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_serial *server =
+	    to_server_driver(vsdrv)->server;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+	case VSERVICE_SERIAL_BASE_REQ_OPEN:
+		ret = vs_server_serial_handle_req_open(server, state, _mbuf);
+		break;
+
+/* command in sync close */
+	case VSERVICE_SERIAL_BASE_REQ_CLOSE:
+		ret = vs_server_serial_handle_req_close(server, state, _mbuf);
+		break;
+
+/* command in sync reopen */
+	case VSERVICE_SERIAL_BASE_REQ_REOPEN:
+		ret = vs_server_serial_handle_req_reopen(server, state, _mbuf);
+		break;
+
+/** interface serial **/
+/* message msg */
+	case VSERVICE_SERIAL_SERIAL_MSG_MSG:
+		ret = vs_server_serial_serial_handle_msg(server, state, _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void serial_handle_notify(struct vs_service_device *service,
+				 uint32_t notify_bits)
+{
+	__maybe_unused struct vs_server_serial_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_serial *server =
+	    to_server_driver(vsdrv)->server;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface serial **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services serialServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/session.c b/drivers/vservices/session.c
new file mode 100644
index 0000000..d695184
--- /dev/null
+++ b/drivers/vservices/session.c
@@ -0,0 +1,2913 @@
+/*
+ * drivers/vservices/session.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the generic session-management code for the vServices framework.
+ * It creates service and session devices on request from session and
+ * transport drivers, respectively; it also queues incoming messages from the
+ * transport and distributes them to the session's services.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+
+#include "session.h"
+#include "transport.h"
+#include "compat.h"
+
+/* Minimum required time between resets to avoid throttling */
+#define RESET_THROTTLE_TIME msecs_to_jiffies(1000)
+
+/*
+ * Minimum/maximum reset throttling time. The reset throttle will start at
+ * the minimum and increase to the maximum exponetially.
+ */
+#define RESET_THROTTLE_MIN RESET_THROTTLE_TIME
+#define RESET_THROTTLE_MAX msecs_to_jiffies(8 * 1000)
+
+/*
+ * If the reset is being throttled and a sane reset (doesn't need throttling)
+ * is requested, then if the service's reset delay mutliplied by this value
+ * has elapsed throttling is disabled.
+ */
+#define RESET_THROTTLE_COOL_OFF_MULT 2
+
+/* IDR of session ids to sessions */
+static DEFINE_IDR(session_idr);
+DEFINE_MUTEX(vs_session_lock);
+EXPORT_SYMBOL_GPL(vs_session_lock);
+
+/* Notifier list for vService session events */
+static BLOCKING_NOTIFIER_HEAD(vs_session_notifier_list);
+
+static unsigned long default_debug_mask;
+module_param(default_debug_mask, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(default_debug_mask, "Default vServices debug mask");
+
+/* vServices root in sysfs at /sys/vservices */
+struct kobject *vservices_root;
+EXPORT_SYMBOL_GPL(vservices_root);
+
+/* vServices server root in sysfs at /sys/vservices/server-sessions */
+struct kobject *vservices_server_root;
+EXPORT_SYMBOL_GPL(vservices_server_root);
+
+/* vServices client root in sysfs at /sys/vservices/client-sessions */
+struct kobject *vservices_client_root;
+EXPORT_SYMBOL_GPL(vservices_client_root);
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+struct vs_service_device *vs_service_lookup_by_devt(dev_t dev)
+{
+	struct vs_session_device *session;
+	struct vs_service_device *service;
+
+	mutex_lock(&vs_session_lock);
+	session = idr_find(&session_idr, MINOR(dev) / VS_MAX_SERVICES);
+	get_device(&session->dev);
+	mutex_unlock(&vs_session_lock);
+
+	service = vs_session_get_service(session,
+			MINOR(dev) % VS_MAX_SERVICES);
+	put_device(&session->dev);
+
+	return service;
+}
+#endif
+
+struct vs_session_for_each_data {
+	int (*fn)(struct vs_session_device *session, void *data);
+	void *data;
+};
+
+int vs_session_for_each_from_idr(int id, void *session, void *_data)
+{
+	struct vs_session_for_each_data *data =
+		(struct vs_session_for_each_data *)_data;
+	return data->fn(session, data->data);
+}
+
+/**
+ * vs_session_for_each_locked - call a callback function for each session
+ * @fn: function to call
+ * @data: opaque pointer that is passed through to the function
+ */
+extern int vs_session_for_each_locked(
+		int (*fn)(struct vs_session_device *session, void *data),
+		void *data)
+{
+	struct vs_session_for_each_data priv = { .fn = fn, .data = data };
+
+	lockdep_assert_held(&vs_session_lock);
+
+	return idr_for_each(&session_idr, vs_session_for_each_from_idr,
+			&priv);
+}
+EXPORT_SYMBOL(vs_session_for_each_locked);
+
+/**
+ * vs_register_notify - register a notifier callback for vServices events
+ * @nb: pointer to the notifier block for the callback events.
+ */
+void vs_session_register_notify(struct notifier_block *nb)
+{
+	blocking_notifier_chain_register(&vs_session_notifier_list, nb);
+}
+EXPORT_SYMBOL(vs_session_register_notify);
+
+/**
+ * vs_unregister_notify - unregister a notifier callback for vServices events
+ * @nb: pointer to the notifier block for the callback events.
+ */
+void vs_session_unregister_notify(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&vs_session_notifier_list, nb);
+}
+EXPORT_SYMBOL(vs_session_unregister_notify);
+
+/*
+ * Helper function for returning how long ago something happened
+ * Marked as __maybe_unused since this is only needed when
+ * CONFIG_VSERVICES_DEBUG is enabled, but cannot be removed because it
+ * will cause compile time errors.
+ */
+static __maybe_unused unsigned msecs_ago(unsigned long jiffy_value)
+{
+	return jiffies_to_msecs(jiffies - jiffy_value);
+}
+
+static void session_fatal_error_work(struct work_struct *work)
+{
+	struct vs_session_device *session = container_of(work,
+			struct vs_session_device, fatal_error_work);
+
+	session->transport->vt->reset(session->transport);
+}
+
+static void session_fatal_error(struct vs_session_device *session, gfp_t gfp)
+{
+	schedule_work(&session->fatal_error_work);
+}
+
+/*
+ * Service readiness state machine
+ *
+ * The states are:
+ *
+ * INIT: Initial state. Service may not be completely configured yet
+ * (typically because the protocol hasn't been set); call vs_service_start
+ * once configuration is complete. The disable count must be nonzero, and
+ * must never reach zero in this state.
+ * DISABLED: Service is not permitted to communicate. Non-core services are
+ * in this state whenever the core protocol and/or transport state does not
+ * allow them to be active; core services are only in this state transiently.
+ * The disable count must be nonzero; when it reaches zero, the service
+ * transitions to RESET state.
+ * RESET: Service drivers are inactive at both ends, but the core service
+ * state allows the service to become active. The session will schedule a
+ * future transition to READY state when entering this state, but the
+ * transition may be delayed to throttle the rate at which resets occur.
+ * READY: All core-service and session-layer policy allows the service to
+ * communicate; it will become active as soon as it has a protocol driver.
+ * ACTIVE: The driver is present and communicating.
+ * LOCAL_RESET: We have initiated a reset at this end, but the remote end has
+ * not yet acknowledged it. We will enter the RESET state on receiving
+ * acknowledgement, unless the disable count is nonzero in which case we
+ * will enter DISABLED state.
+ * LOCAL_DELETE: As for LOCAL_RESET, but we will enter the DELETED state
+ * instead of RESET or DISABLED.
+ * DELETED: The service is no longer present on the session; the service
+ * device structure may still exist because something is holding a reference
+ * to it.
+ *
+ * The permitted transitions are:
+ *
+ * From          To            Trigger
+ * INIT          DISABLED      vs_service_start
+ * DISABLED      RESET         vs_service_enable (disable_count -> 0)
+ * RESET         READY         End of throttle delay (may be 0)
+ * READY         ACTIVE        Latter of probe() and entering READY
+ * {READY, ACTIVE}
+ *               LOCAL_RESET   vs_service_reset
+ * {READY, ACTIVE, LOCAL_RESET}
+ *               RESET         vs_service_handle_reset (server)
+ * RESET         DISABLED      vs_service_disable (server)
+ * {READY, ACTIVE, LOCAL_RESET}
+ *               DISABLED      vs_service_handle_reset (client)
+ * {INIT, RESET, READY, ACTIVE, LOCAL_RESET}
+ *               DISABLED      vs_service_disable_noncore
+ * {ACTIVE, LOCAL_RESET}
+ *               LOCAL_DELETE  vs_service_delete
+ * {INIT, DISABLED, RESET, READY}
+ *               DELETED       vs_service_delete
+ * LOCAL_DELETE  DELETED       vs_service_handle_reset
+ *                             vs_service_disable_noncore
+ *
+ * See the documentation for the triggers for details.
+ */
+
+enum vs_service_readiness {
+	VS_SERVICE_INIT,
+	VS_SERVICE_DISABLED,
+	VS_SERVICE_RESET,
+	VS_SERVICE_READY,
+	VS_SERVICE_ACTIVE,
+	VS_SERVICE_LOCAL_RESET,
+	VS_SERVICE_LOCAL_DELETE,
+	VS_SERVICE_DELETED,
+};
+
+/* Session activation states. */
+enum {
+	VS_SESSION_RESET,
+	VS_SESSION_ACTIVATE,
+	VS_SESSION_ACTIVE,
+};
+
+/**
+ * vs_service_start - Start a service by moving it from the init state to the
+ * disabled state.
+ *
+ * @service: The service to start.
+ *
+ * Returns true if the service was started, or false if it was not.
+ */
+bool vs_service_start(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+
+	WARN_ON(!service->protocol);
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	if (service->readiness != VS_SERVICE_INIT) {
+		if (service->readiness != VS_SERVICE_DELETED)
+			dev_err(&service->dev,
+					"start called from invalid state %d\n",
+					service->readiness);
+		mutex_unlock(&service->ready_lock);
+		return false;
+	}
+
+	if (service->id != 0 && session_drv->service_added) {
+		int err = session_drv->service_added(session, service);
+		if (err < 0) {
+			dev_err(&session->dev, "Failed to add service %d: %d\n",
+					service->id, err);
+			mutex_unlock(&service->ready_lock);
+			return false;
+		}
+	}
+
+	service->readiness = VS_SERVICE_DISABLED;
+	service->disable_count = 1;
+	service->last_reset_request = jiffies;
+
+	mutex_unlock(&service->ready_lock);
+
+	/* Tell userspace about the service. */
+	dev_set_uevent_suppress(&service->dev, false);
+	kobject_uevent(&service->dev.kobj, KOBJ_ADD);
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(vs_service_start);
+
+static void cancel_pending_rx(struct vs_service_device *service);
+static void queue_ready_work(struct vs_service_device *service);
+
+static void __try_start_service(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	struct vs_transport *transport;
+	int err;
+	struct vs_service_driver *driver;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	/* We can't start if the service is not ready yet. */
+	if (service->readiness != VS_SERVICE_READY)
+		return;
+
+	/*
+	 * There should never be anything in the RX queue at this point.
+	 * If there is, it can seriously confuse the service drivers for
+	 * no obvious reason, so we check.
+	 */
+	if (WARN_ON(!list_empty(&service->rx_queue)))
+		cancel_pending_rx(service);
+
+	if (!service->driver_probed) {
+		vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+				"ready with no driver\n");
+		return;
+	}
+
+	/* Prepare the transport to support the service. */
+	transport = session->transport;
+	err = transport->vt->service_start(transport, service);
+
+	if (err < 0) {
+		/* fatal error attempting to start; reset and try again */
+		service->readiness = VS_SERVICE_RESET;
+		service->last_reset_request = jiffies;
+		service->last_reset = jiffies;
+		queue_ready_work(service);
+
+		return;
+	}
+
+	service->readiness = VS_SERVICE_ACTIVE;
+
+	driver = to_vs_service_driver(service->dev.driver);
+	if (driver->start)
+		driver->start(service);
+
+	if (service->id && session_drv->service_start) {
+		err = session_drv->service_start(session, service);
+		if (err < 0) {
+			dev_err(&session->dev, "Failed to start service %s (%d): %d\n",
+					dev_name(&service->dev),
+					service->id, err);
+			session_fatal_error(session, GFP_KERNEL);
+		}
+	}
+}
+
+static void try_start_service(struct vs_service_device *service)
+{
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	__try_start_service(service);
+
+	mutex_unlock(&service->ready_lock);
+}
+
+static void service_ready_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, ready_work.work);
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+			"ready work - last reset request was %u ms ago\n",
+			msecs_ago(service->last_reset_request));
+
+	/*
+	 * Make sure there's no reset work pending from an earlier driver
+	 * failure. We should already be inactive at this point, so it's safe
+	 * to just cancel it.
+	 */
+	cancel_work_sync(&service->reset_work);
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	if (service->readiness != VS_SERVICE_RESET) {
+		vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+				"ready work found readiness of %d, doing nothing\n",
+				service->readiness);
+		mutex_unlock(&service->ready_lock);
+		return;
+	}
+
+	service->readiness = VS_SERVICE_READY;
+	/* Record the time at which this happened, for throttling. */
+	service->last_ready = jiffies;
+
+	/* Tell userspace that the service is ready. */
+	kobject_uevent(&service->dev.kobj, KOBJ_ONLINE);
+
+	/* Start the service, if it has a driver attached. */
+	__try_start_service(service);
+
+	mutex_unlock(&service->ready_lock);
+}
+
+static int __enable_service(struct vs_service_device *service);
+
+/**
+ * __reset_service - make a service inactive, and tell its driver, the
+ * transport, and possibly the remote partner
+ * @service:       The service to reset
+ * @notify_remote: If true, the partner is notified of the reset
+ *
+ * This routine is called to make an active service inactive. If the given
+ * service is currently active, it drops any queued messages for the service,
+ * and then informs the service driver and the transport layer that the
+ * service has reset. It sets the service readiness to VS_SERVICE_LOCAL_RESET
+ * to indicate that the driver is no longer active.
+ *
+ * This routine has no effect on services that are not active.
+ *
+ * The caller must hold the target service's ready lock.
+ */
+static void __reset_service(struct vs_service_device *service,
+		bool notify_remote)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	struct vs_service_driver *driver = NULL;
+	struct vs_transport *transport;
+	int err;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	/* If we're already inactive, there's nothing to do. */
+	if (service->readiness != VS_SERVICE_ACTIVE)
+		return;
+
+	service->last_reset = jiffies;
+	service->readiness = VS_SERVICE_LOCAL_RESET;
+
+	cancel_pending_rx(service);
+
+	if (!WARN_ON(!service->driver_probed))
+		driver = to_vs_service_driver(service->dev.driver);
+
+	if (driver && driver->reset)
+		driver->reset(service);
+
+	wake_up_all(&service->quota_wq);
+
+	transport = vs_service_get_session(service)->transport;
+
+	/*
+	 * Ask the transport to reset the service. If this returns a positive
+	 * value, we need to leave the service disabled, and the transport
+	 * will re-enable it. To avoid allowing the disable count to go
+	 * negative if that re-enable races with this callback returning, we
+	 * disable the service beforehand and re-enable it if the callback
+	 * returns zero.
+	 */
+	service->disable_count++;
+	err = transport->vt->service_reset(transport, service);
+	if (err < 0) {
+		dev_err(&session->dev, "Failed to reset service %d: %d (transport)\n",
+				service->id, err);
+		session_fatal_error(session, GFP_KERNEL);
+	} else if (!err) {
+		err = __enable_service(service);
+	}
+
+	if (notify_remote) {
+		if (service->id) {
+			err = session_drv->service_local_reset(session,
+					service);
+			if (err == VS_SERVICE_ALREADY_RESET) {
+				service->readiness = VS_SERVICE_RESET;
+                                service->last_reset = jiffies;
+                                queue_ready_work(service);
+
+			} else if (err < 0) {
+				dev_err(&session->dev, "Failed to reset service %d: %d (session)\n",
+						service->id, err);
+				session_fatal_error(session, GFP_KERNEL);
+			}
+		} else {
+			session->transport->vt->reset(session->transport);
+		}
+	}
+
+	/* Tell userspace that the service is no longer active. */
+	kobject_uevent(&service->dev.kobj, KOBJ_OFFLINE);
+}
+
+/**
+ * reset_service - reset a service and inform the remote partner
+ * @service: The service to reset
+ *
+ * This routine is called when a reset is locally initiated (other than
+ * implicitly by a session / core service reset). It bumps the reset request
+ * timestamp, acquires the necessary locks, and calls __reset_service.
+ *
+ * This routine returns with the service ready lock held, to allow the caller
+ * to make any other state changes that must be atomic with the service
+ * reset.
+ */
+static void reset_service(struct vs_service_device *service)
+	__acquires(service->ready_lock)
+{
+	service->last_reset_request = jiffies;
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	__reset_service(service, true);
+}
+
+/**
+ * vs_service_reset - initiate a service reset
+ * @service: the service that is to be reset
+ * @caller: the service that is initiating the reset
+ *
+ * This routine informs the partner that the given service is being reset,
+ * then disables and flushes the service's receive queues and resets its
+ * driver. The service will be automatically re-enabled once the partner has
+ * acknowledged the reset (see vs_session_handle_service_reset, above).
+ *
+ * If the given service is the core service, this will perform a transport
+ * reset, which implicitly resets (on the server side) or destroys (on
+ * the client side) every other service on the session.
+ *
+ * If the given service is already being reset, this has no effect, other
+ * than to delay completion of the reset if it is being throttled.
+ *
+ * For lock safety reasons, a service can only be directly reset by itself,
+ * the core service, or the service that created it (which is typically also
+ * the core service).
+ *
+ * A service that wishes to reset itself must not do so while holding its state
+ * lock or while running on its own workqueue. In these circumstances, call
+ * vs_service_reset_nosync() instead. Note that returning an error code
+ * (any negative number) from a driver callback forces a call to
+ * vs_service_reset_nosync() and prints an error message.
+ */
+int vs_service_reset(struct vs_service_device *service,
+		struct vs_service_device *caller)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	if (caller != service && caller != service->owner) {
+		struct vs_service_device *core_service = session->core_service;
+
+		WARN_ON(!core_service);
+		if (caller != core_service)
+			return -EPERM;
+	}
+
+	reset_service(service);
+	/* reset_service returns with ready_lock held, but we don't need it */
+	mutex_unlock(&service->ready_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_reset);
+
+/**
+ * vs_service_reset_nosync - asynchronously reset a service.
+ * @service: the service that is to be reset
+ *
+ * This routine triggers a reset for the nominated service. It may be called
+ * from any context, including interrupt context. It does not wait for the
+ * reset to occur, and provides no synchronisation guarantees when called from
+ * outside the target service.
+ *
+ * This is intended only for service drivers that need to reset themselves
+ * from a context that would not normally allow it. In other cases, use
+ * vs_service_reset.
+ */
+void vs_service_reset_nosync(struct vs_service_device *service)
+{
+	service->pending_reset = true;
+	schedule_work(&service->reset_work);
+}
+EXPORT_SYMBOL_GPL(vs_service_reset_nosync);
+
+static void
+vs_service_remove_sysfs_entries(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	sysfs_remove_link(session->sysfs_entry, service->sysfs_name);
+	sysfs_remove_link(&service->dev.kobj, VS_SESSION_SYMLINK_NAME);
+}
+
+static void vs_session_release_service_id(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	mutex_lock(&session->service_idr_lock);
+	idr_remove(&session->service_idr, service->id);
+	mutex_unlock(&session->service_idr_lock);
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+			"service id deallocated\n");
+}
+
+static void destroy_service(struct vs_service_device *service,
+		bool notify_remote)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	struct vs_service_device *core_service __maybe_unused =
+			session->core_service;
+	int err;
+
+	lockdep_assert_held(&service->ready_lock);
+	WARN_ON(service->readiness != VS_SERVICE_DELETED);
+
+	/* Notify the core service and transport that the service is gone */
+	session->transport->vt->service_remove(session->transport, service);
+	if (notify_remote && service->id && session_drv->service_removed) {
+		err = session_drv->service_removed(session, service);
+		if (err < 0) {
+			dev_err(&session->dev,
+					"Failed to remove service %d: %d\n",
+					service->id, err);
+			session_fatal_error(session, GFP_KERNEL);
+		}
+	}
+
+	/*
+	 * At this point the service is guaranteed to be gone on the client
+	 * side, so we can safely release the service ID.
+	 */
+	if (session->is_server)
+		vs_session_release_service_id(service);
+
+	/*
+	 * This guarantees that any concurrent vs_session_get_service() that
+	 * found the service before we removed it from the IDR will take a
+	 * reference before we release ours.
+	 *
+	 * This similarly protects for_each_[usable_]service().
+	 */
+	synchronize_rcu();
+
+	/* Matches device_initialize() in vs_service_register() */
+	put_device(&service->dev);
+}
+
+/**
+ * disable_service - prevent a service becoming ready
+ * @service: the service that is to be disabled
+ * @force: true if the service is known to be in reset
+ *
+ * This routine may be called for any inactive service. Once disabled, the
+ * service cannot be made ready by the session, and thus cannot become active,
+ * until vs_service_enable() is called for it. If multiple calls are made to
+ * this function, they must be balanced by vs_service_enable() calls.
+ *
+ * If the force option is true, then any pending unacknowledged reset will be
+ * presumed to have been acknowledged. This is used when the core service is
+ * entering reset.
+ *
+ * This is used by the core service client to prevent the service restarting
+ * until the server is ready (i.e., a server_ready message is received); by
+ * the session layer to stop all communication while the core service itself
+ * is in reset; and by the transport layer when the transport was unable to
+ * complete reset of a service in its reset callback (typically because
+ * a service had passed message buffers to another Linux subsystem and could
+ * not free them immediately).
+ *
+ * In any case, there is no need for the operation to be signalled in any
+ * way, because the service is already in reset. It simply delays future
+ * signalling of service readiness.
+ */
+static void disable_service(struct vs_service_device *service, bool force)
+{
+	lockdep_assert_held(&service->ready_lock);
+
+	switch(service->readiness) {
+	case VS_SERVICE_INIT:
+	case VS_SERVICE_DELETED:
+	case VS_SERVICE_LOCAL_DELETE:
+		dev_err(&service->dev, "disabled while uninitialised\n");
+		break;
+	case VS_SERVICE_ACTIVE:
+		dev_err(&service->dev, "disabled while active\n");
+		break;
+	case VS_SERVICE_LOCAL_RESET:
+		/*
+		 * Will go to DISABLED state when reset completes, unless
+		 * it's being forced (i.e. we're moving to a core protocol
+		 * state that implies everything else is reset).
+		 */
+		if (force)
+			service->readiness = VS_SERVICE_DISABLED;
+		service->disable_count++;
+		break;
+	default:
+		service->readiness = VS_SERVICE_DISABLED;
+		service->disable_count++;
+		break;
+	}
+
+	cancel_delayed_work(&service->ready_work);
+}
+
+static int service_handle_reset(struct vs_session_device *session,
+		struct vs_service_device *target, bool disable)
+{
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	int err = 0;
+
+	mutex_lock_nested(&target->ready_lock, target->lock_subclass);
+
+	switch (target->readiness) {
+	case VS_SERVICE_LOCAL_DELETE:
+		target->readiness = VS_SERVICE_DELETED;
+		destroy_service(target, true);
+		break;
+	case VS_SERVICE_ACTIVE:
+		/*
+		 * Reset the service and send a reset notification.
+		 *
+		 * We only send notifications for non-core services. This is
+		 * because core notifies by sending a transport reset, which
+		 * is what brought us here in the first place. Note that we
+		 * must already hold the core service state lock iff the
+		 * target is non-core.
+		 */
+		target->last_reset_request = jiffies;
+		__reset_service(target, target->id != 0);
+		/* fall through */
+	case VS_SERVICE_LOCAL_RESET:
+		target->readiness = target->disable_count ?
+			VS_SERVICE_DISABLED : VS_SERVICE_RESET;
+		if (disable)
+			disable_service(target, false);
+		if (target->readiness != VS_SERVICE_DISABLED)
+			queue_ready_work(target);
+		break;
+	case VS_SERVICE_READY:
+		/* Tell userspace that the service is no longer ready. */
+		kobject_uevent(&target->dev.kobj, KOBJ_OFFLINE);
+		/* fall through */
+	case VS_SERVICE_RESET:
+		/*
+		 * This can happen for a non-core service if we get a reset
+		 * request from the server on the client side, after the
+		 * client has enabled the service but before it is active.
+		 * Note that the service is already active on the server side
+		 * at this point. The client's delay may be due to either
+		 * reset throttling or the absence of a driver.
+		 *
+		 * We bump the reset request timestamp, disable the service
+		 * again, and send back an acknowledgement.
+		 */
+		if (disable && target->id) {
+			target->last_reset_request = jiffies;
+
+			err = session_drv->service_local_reset(
+					session, target);
+			if (err < 0) {
+				dev_err(&session->dev,
+						"Failed to reset service %d; %d\n",
+						target->id, err);
+				session_fatal_error(session,
+						GFP_KERNEL);
+			}
+
+			disable_service(target, false);
+			break;
+		}
+		/* fall through */
+	case VS_SERVICE_DISABLED:
+		/*
+		 * This can happen for the core service if we get a reset
+		 * before the transport has activated, or before the core
+		 * service has become ready.
+		 *
+		 * We bump the reset request timestamp, and disable the
+		 * service again if the transport had already activated and
+		 * enabled it.
+		 */
+		if (disable && !target->id) {
+			target->last_reset_request = jiffies;
+
+			if (target->readiness != VS_SERVICE_DISABLED)
+				disable_service(target, false);
+
+			break;
+		}
+		/* fall through */
+	default:
+		dev_warn(&target->dev, "remote reset while inactive (%d)\n",
+				target->readiness);
+		err = -EPROTO;
+		break;
+	}
+
+	mutex_unlock(&target->ready_lock);
+	return err;
+}
+
+/**
+ * vs_service_handle_reset - handle an incoming notification of a reset
+ * @session: the session that owns the service
+ * @service_id: the ID of the service that is to be reset
+ * @disable: if true, the service will not be automatically re-enabled
+ *
+ * This routine is called by the core service when the remote end notifies us
+ * of a non-core service reset. The service must be in ACTIVE, LOCAL_RESET or
+ * LOCAL_DELETED state. It must be called with the core service's state lock
+ * held.
+ *
+ * If the service was in ACTIVE state, the core service is called back to send
+ * a notification to the other end. If it was in LOCAL_DELETED state, it is
+ * unregistered.
+ */
+int vs_service_handle_reset(struct vs_session_device *session,
+		vs_service_id_t service_id, bool disable)
+{
+	struct vs_service_device *target;
+	int ret;
+
+	if (!service_id)
+		return -EINVAL;
+
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -ENODEV;
+
+	ret = service_handle_reset(session, target, disable);
+	vs_put_service(target);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vs_service_handle_reset);
+
+static int __enable_service(struct vs_service_device *service)
+{
+	if (WARN_ON(!service->disable_count))
+		return -EINVAL;
+
+	if (--service->disable_count > 0)
+		return 0;
+
+	/*
+	 * If the service is still resetting, it can't become ready until the
+	 * reset completes. If it has been deleted, it will never become
+	 * ready. In either case, there's nothing more to do.
+	 */
+	if ((service->readiness == VS_SERVICE_LOCAL_RESET) ||
+			(service->readiness == VS_SERVICE_LOCAL_DELETE) ||
+			(service->readiness == VS_SERVICE_DELETED))
+		return 0;
+
+	if (WARN_ON(service->readiness != VS_SERVICE_DISABLED))
+		return -EINVAL;
+
+	service->readiness = VS_SERVICE_RESET;
+	service->last_reset = jiffies;
+	queue_ready_work(service);
+
+	return 0;
+}
+
+/**
+ * vs_service_enable - allow a service to become ready
+ * @service: the service that is to be enabled
+ *
+ * Calling this routine for a service permits the session layer to make the
+ * service ready. It will do so as soon as any outstanding reset throttling
+ * is complete, and will then start the service once it has a driver attached.
+ *
+ * Services are disabled, requiring a call to this routine to re-enable them:
+ * - when first initialised (after vs_service_start),
+ * - when reset on the client side by vs_service_handle_reset,
+ * - when the transport has delayed completion of a reset, and
+ * - when the server-side core protocol is disconnected or reset by
+ *   vs_session_disable_noncore.
+ */
+int vs_service_enable(struct vs_service_device *service)
+{
+	int ret;
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	ret = __enable_service(service);
+
+	mutex_unlock(&service->ready_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vs_service_enable);
+
+/*
+ * Service work functions
+ */
+static void queue_rx_work(struct vs_service_device *service)
+{
+	bool rx_atomic;
+
+	rx_atomic = vs_service_has_atomic_rx(service);
+	vs_dev_debug(VS_DEBUG_SESSION, vs_service_get_session(service),
+			&service->dev, "Queuing rx %s\n",
+			rx_atomic ? "tasklet (atomic)" : "work (cansleep)");
+
+	if (rx_atomic)
+		tasklet_schedule(&service->rx_tasklet);
+	else
+		queue_work(service->work_queue, &service->rx_work);
+}
+
+static void cancel_pending_rx(struct vs_service_device *service)
+{
+	struct vs_mbuf *mbuf;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	cancel_work_sync(&service->rx_work);
+	tasklet_kill(&service->rx_tasklet);
+
+	spin_lock_irq(&service->rx_lock);
+	while (!list_empty(&service->rx_queue)) {
+		mbuf = list_first_entry(&service->rx_queue,
+				struct vs_mbuf, queue);
+		list_del_init(&mbuf->queue);
+		spin_unlock_irq(&service->rx_lock);
+		vs_service_free_mbuf(service, mbuf);
+		spin_lock_irq(&service->rx_lock);
+	}
+	service->tx_ready = false;
+	spin_unlock_irq(&service->rx_lock);
+}
+
+static bool reset_throttle_cooled_off(struct vs_service_device *service);
+static unsigned long reset_cool_off(struct vs_service_device *service);
+
+static void service_cooloff_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, cooloff_work.work);
+	struct vs_session_device *session = vs_service_get_session(service);
+	unsigned long current_time = jiffies, wake_time;
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	if (reset_throttle_cooled_off(service)) {
+		vs_debug(VS_DEBUG_SESSION, session,
+				"Reset thrashing cooled off (delay = %u ms, cool off = %u ms, last reset %u ms ago, last reset request was %u ms ago)\n",
+				jiffies_to_msecs(service->reset_delay),
+				jiffies_to_msecs(reset_cool_off(service)),
+				msecs_ago(service->last_reset),
+				msecs_ago(service->last_reset_request));
+
+		service->reset_delay = 0;
+
+		/*
+		 * If the service is already in reset, then queue_ready_work
+		 * has already run and has deferred queuing of the ready_work
+		 * until cooloff. Schedule the ready work to run immediately.
+		 */
+		if (service->readiness == VS_SERVICE_RESET)
+			schedule_delayed_work(&service->ready_work, 0);
+	} else {
+		/*
+		 * This can happen if last_reset_request has been bumped
+		 * since the cooloff work was first queued. We need to
+		 * work out how long it is until the service cools off,
+		 * then reschedule ourselves.
+		 */
+		wake_time = reset_cool_off(service) +
+				service->last_reset_request;
+
+		WARN_ON(time_after(current_time, wake_time));
+
+		schedule_delayed_work(&service->cooloff_work,
+				wake_time - current_time);
+	}
+
+	mutex_unlock(&service->ready_lock);
+}
+
+static void
+service_reset_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, reset_work);
+
+	service->pending_reset = false;
+
+	vs_service_reset(service, service);
+}
+
+/* Returns true if there are more messages to handle */
+static bool
+dequeue_and_handle_received_message(struct vs_service_device *service)
+{
+	struct vs_service_driver *driver =
+			to_vs_service_driver(service->dev.driver);
+	struct vs_session_device *session = vs_service_get_session(service);
+	const struct vs_transport_vtable *vt = session->transport->vt;
+	struct vs_service_stats *stats = &service->stats;
+	struct vs_mbuf *mbuf;
+	size_t size;
+	int ret;
+
+	/* Don't do rx work unless the service is active */
+	if (service->readiness != VS_SERVICE_ACTIVE)
+		return false;
+
+	/* Atomically take an item from the queue */
+	spin_lock_irq(&service->rx_lock);
+	if (!list_empty(&service->rx_queue)) {
+		mbuf = list_first_entry(&service->rx_queue, struct vs_mbuf,
+				queue);
+		list_del_init(&mbuf->queue);
+		spin_unlock_irq(&service->rx_lock);
+		size = vt->mbuf_size(mbuf);
+
+		/*
+		 * Call the message handler for the service. The service's
+		 * message handler is responsible for freeing the mbuf when it
+		 * is done with it.
+		 */
+		ret = driver->receive(service, mbuf);
+		if (ret < 0) {
+			atomic_inc(&service->stats.recv_failures);
+			dev_err(&service->dev,
+					"receive returned %d; resetting service\n",
+					ret);
+			vs_service_reset_nosync(service);
+			return false;
+		} else {
+			atomic_add(size, &service->stats.recv_bytes);
+			atomic_inc(&service->stats.recv_mbufs);
+		}
+
+	} else if (service->tx_ready) {
+		service->tx_ready = false;
+		spin_unlock_irq(&service->rx_lock);
+
+		/*
+		 * Update the tx_ready stats accounting and then call the
+		 * service's tx_ready handler.
+		 */
+		atomic_inc(&stats->nr_tx_ready);
+		if (atomic_read(&stats->nr_over_quota) > 0) {
+			int total;
+
+			total = atomic_add_return(jiffies_to_msecs(jiffies -
+							stats->over_quota_time),
+					&stats->over_quota_time_total);
+			atomic_set(&stats->over_quota_time_avg, total /
+					atomic_read(&stats->nr_over_quota));
+		}
+		atomic_set(&service->is_over_quota, 0);
+
+		/*
+		 * Note that a service's quota may reduce at any point, even
+		 * during the tx_ready handler. This is important if a service
+		 * has an ordered list of pending messages to send. If a
+		 * message fails to send from the tx_ready handler due to
+		 * over-quota then subsequent messages in the same handler may
+		 * send successfully. To avoid sending messages in the
+		 * incorrect order the service's tx_ready handler should
+		 * return immediately if a message fails to send.
+		 */
+		ret = driver->tx_ready(service);
+		if (ret < 0) {
+			dev_err(&service->dev,
+					"tx_ready returned %d; resetting service\n",
+					ret);
+			vs_service_reset_nosync(service);
+			return false;
+		}
+	} else {
+		spin_unlock_irq(&service->rx_lock);
+	}
+
+	/*
+	 * There's no need to lock for this list_empty: if we race
+	 * with a msg enqueue, we'll be rescheduled by the other side,
+	 * and if we race with a dequeue, we'll just do nothing when
+	 * we run (or will be cancelled before we run).
+	 */
+	return !list_empty(&service->rx_queue) || service->tx_ready;
+}
+
+static void service_rx_tasklet(unsigned long data)
+{
+	struct vs_service_device *service = (struct vs_service_device *)data;
+	bool resched;
+
+	/*
+	 * There is no need to acquire the state spinlock or mutex here,
+	 * because this tasklet is disabled when the lock is held. These
+	 * are annotations for sparse and lockdep, respectively.
+	 *
+	 * We can't annotate the implicit mutex acquire because lockdep gets
+	 * upset about inconsistent softirq states.
+	 */
+	__acquire(service);
+	spin_acquire(&service->state_spinlock.dep_map, 0, 0, _THIS_IP_);
+
+	resched = dequeue_and_handle_received_message(service);
+
+	if (resched)
+		tasklet_schedule(&service->rx_tasklet);
+
+	spin_release(&service->state_spinlock.dep_map, 0, _THIS_IP_);
+	__release(service);
+}
+
+static void service_rx_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, rx_work);
+	bool requeue;
+
+	/*
+	 * We must acquire the state mutex here to protect services that
+	 * are using vs_service_state_lock().
+	 *
+	 * There is no need to acquire the spinlock, which is never used in
+	 * drivers with task context receive handlers.
+	 */
+	vs_service_state_lock(service);
+
+	requeue = dequeue_and_handle_received_message(service);
+
+	vs_service_state_unlock(service);
+
+	if (requeue)
+		queue_work(service->work_queue, work);
+}
+
+/*
+ * Service sysfs statistics counters. These files are all atomic_t, and
+ * read only, so we use a generator macro to avoid code duplication.
+ */
+#define service_stat_attr(__name)					\
+	static ssize_t service_stat_##__name##_show(struct device *dev, \
+			struct device_attribute *attr, char *buf)       \
+	{                                                               \
+		struct vs_service_device *service =                     \
+				to_vs_service_device(dev);              \
+									\
+		return scnprintf(buf, PAGE_SIZE, "%u\n",		\
+				atomic_read(&service->stats.__name));	\
+	}                                                               \
+	static DEVICE_ATTR(__name, S_IRUGO,                             \
+			service_stat_##__name##_show, NULL);
+
+service_stat_attr(sent_mbufs);
+service_stat_attr(sent_bytes);
+service_stat_attr(recv_mbufs);
+service_stat_attr(recv_bytes);
+service_stat_attr(nr_over_quota);
+service_stat_attr(nr_tx_ready);
+service_stat_attr(over_quota_time_total);
+service_stat_attr(over_quota_time_avg);
+
+static struct attribute *service_stat_dev_attrs[] = {
+	&dev_attr_sent_mbufs.attr,
+	&dev_attr_sent_bytes.attr,
+	&dev_attr_recv_mbufs.attr,
+	&dev_attr_recv_bytes.attr,
+	&dev_attr_nr_over_quota.attr,
+	&dev_attr_nr_tx_ready.attr,
+	&dev_attr_over_quota_time_total.attr,
+	&dev_attr_over_quota_time_avg.attr,
+	NULL,
+};
+
+static const struct attribute_group service_stat_attributes = {
+	.name   = "stats",
+	.attrs  = service_stat_dev_attrs,
+};
+
+static void delete_service(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	bool notify_on_destroy = true;
+
+	/* FIXME: Jira ticket SDK-3495 - philipd. */
+	/* This should be the caller's responsibility */
+	vs_get_service(service);
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	/*
+	 * If we're on the client side, the service should already have been
+	 * disabled at this point.
+	 */
+	WARN_ON(service->id != 0 && !session->is_server &&
+			service->readiness != VS_SERVICE_DISABLED &&
+			service->readiness != VS_SERVICE_DELETED);
+
+	/*
+	 * Make sure the service is not active, and notify the remote end if
+	 * it needs to be reset. Note that we already hold the core service
+	 * state lock iff this is a non-core service.
+	 */
+	__reset_service(service, true);
+
+	/*
+	 * If the remote end is aware that the service is inactive, we can
+	 * delete right away; otherwise we need to wait for a notification
+	 * that the service has reset.
+	 */
+	switch (service->readiness) {
+	case VS_SERVICE_LOCAL_DELETE:
+	case VS_SERVICE_DELETED:
+		/* Nothing to do here */
+		mutex_unlock(&service->ready_lock);
+		vs_put_service(service);
+		return;
+	case VS_SERVICE_ACTIVE:
+		BUG();
+		break;
+	case VS_SERVICE_LOCAL_RESET:
+		service->readiness = VS_SERVICE_LOCAL_DELETE;
+		break;
+	case VS_SERVICE_INIT:
+		notify_on_destroy = false;
+		/* Fall through */
+	default:
+		service->readiness = VS_SERVICE_DELETED;
+		destroy_service(service, notify_on_destroy);
+		break;
+	}
+
+	mutex_unlock(&service->ready_lock);
+
+	/*
+	 * Remove service syslink from
+	 * sys/vservices/(<server>/<client>)-sessions/ directory
+	 */
+	vs_service_remove_sysfs_entries(session, service);
+
+	sysfs_remove_group(&service->dev.kobj, &service_stat_attributes);
+
+	/*
+	 * On the client-side we need to release the service id as soon as
+	 * the service is deleted. Otherwise the server may attempt to create
+	 * a new service with this id.
+	 */
+	if (!session->is_server)
+		vs_session_release_service_id(service);
+
+	device_del(&service->dev);
+	vs_put_service(service);
+}
+
+/**
+ * vs_service_delete - deactivate and start removing a service device
+ * @service: the service to delete
+ * @caller: the service initiating deletion
+ *
+ * Services may only be deleted by their owner (on the server side), or by the
+ * core service. This function must not be called for the core service.
+ */
+int vs_service_delete(struct vs_service_device *service,
+		struct vs_service_device *caller)
+{
+	struct vs_session_device *session =
+			vs_service_get_session(service);
+	struct vs_service_device *core_service = session->core_service;
+
+	if (WARN_ON(!core_service))
+		return -ENODEV;
+
+	if (!service->id)
+		return -EINVAL;
+
+	if (caller != service->owner && caller != core_service)
+		return -EPERM;
+
+	delete_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_delete);
+
+/**
+ * vs_service_handle_delete - deactivate and start removing a service device
+ * @service: the service to delete
+ *
+ * This is a variant of vs_service_delete which must only be called by the
+ * core service. It is used by the core service client when a service_removed
+ * message is received.
+ */
+int vs_service_handle_delete(struct vs_service_device *service)
+{
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(service);
+	struct vs_service_device *core_service __maybe_unused =
+			session->core_service;
+
+	lockdep_assert_held(&core_service->state_mutex);
+
+	delete_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_handle_delete);
+
+static void service_cleanup_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, cleanup_work);
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev, "cleanup\n");
+
+	if (service->owner)
+		vs_put_service(service->owner);
+
+	/* Put our reference to the session */
+	if (service->dev.parent)
+		put_device(service->dev.parent);
+
+	tasklet_kill(&service->rx_tasklet);
+	cancel_work_sync(&service->rx_work);
+	cancel_delayed_work_sync(&service->cooloff_work);
+	cancel_delayed_work_sync(&service->ready_work);
+	cancel_work_sync(&service->reset_work);
+
+	if (service->work_queue)
+		destroy_workqueue(service->work_queue);
+
+	kfree(service->sysfs_name);
+	kfree(service->name);
+	kfree(service->protocol);
+	kfree(service);
+}
+
+static void vs_service_release(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	vs_dev_debug(VS_DEBUG_SESSION, vs_service_get_session(service),
+			&service->dev, "release\n");
+
+	/*
+	 * We need to defer cleanup to avoid a circular dependency between the
+	 * core service's state lock (which can be held at this point, on the
+	 * client side) and any non-core service's reset work (which we must
+	 * cancel here, and which acquires the core service state lock).
+	 */
+	schedule_work(&service->cleanup_work);
+}
+
+static int service_add_idr(struct vs_session_device *session,
+		struct vs_service_device *service, vs_service_id_t service_id)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	int err, base_id, id;
+
+	if (service_id == VS_SERVICE_AUTO_ALLOCATE_ID)
+		base_id = 1;
+	else
+		base_id = service_id;
+
+retry:
+	if (!idr_pre_get(&session->service_idr, GFP_KERNEL))
+		return -ENOMEM;
+
+	mutex_lock(&session->service_idr_lock);
+	err = idr_get_new_above(&session->service_idr, service, base_id, &id);
+	if (err == 0) {
+		if (service_id != VS_SERVICE_AUTO_ALLOCATE_ID &&
+				id != service_id) {
+			/* Failed to allocated the requested service id */
+			idr_remove(&session->service_idr, id);
+			mutex_unlock(&session->service_idr_lock);
+			return -EBUSY;
+		}
+		if (id > VS_MAX_SERVICE_ID) {
+			/* We are out of service ids */
+			idr_remove(&session->service_idr, id);
+			mutex_unlock(&session->service_idr_lock);
+			return -ENOSPC;
+		}
+	}
+	mutex_unlock(&session->service_idr_lock);
+	if (err == -EAGAIN)
+		goto retry;
+	if (err < 0)
+		return err;
+#else
+	int start, end, id;
+
+	if (service_id == VS_SERVICE_AUTO_ALLOCATE_ID) {
+		start = 1;
+		end = VS_MAX_SERVICES;
+	} else {
+		start = service_id;
+		end = service_id + 1;
+	}
+
+	mutex_lock(&session->service_idr_lock);
+	id = idr_alloc(&session->service_idr, service, start, end,
+			GFP_KERNEL);
+	mutex_unlock(&session->service_idr_lock);
+
+	if (id == -ENOSPC)
+		return -EBUSY;
+	else if (id < 0)
+		return id;
+#endif
+
+	service->id = id;
+	return 0;
+}
+
+static int
+vs_service_create_sysfs_entries(struct vs_session_device *session,
+		struct vs_service_device *service, vs_service_id_t id)
+{
+	int ret;
+	char *sysfs_name, *c;
+
+	/* Add a symlink to session device inside service device sysfs */
+	ret = sysfs_create_link(&service->dev.kobj, &session->dev.kobj,
+			VS_SESSION_SYMLINK_NAME);
+	if (ret) {
+		dev_err(&service->dev, "Error %d creating session symlink\n",
+				ret);
+		goto fail;
+	}
+
+	/* Get the length of the string for sysfs dir */
+	sysfs_name = kasprintf(GFP_KERNEL, "%s:%d", service->name, id);
+	if (!sysfs_name) {
+		ret = -ENOMEM;
+		goto fail_session_link;
+	}
+
+	/*
+	 * We dont want to create symlinks with /'s which could get interpreted
+	 * as another directory so replace all /'s with !'s
+	 */
+	while ((c = strchr(sysfs_name, '/')))
+		*c = '!';
+	ret = sysfs_create_link(session->sysfs_entry, &service->dev.kobj,
+			sysfs_name);
+	if (ret)
+		goto fail_free_sysfs_name;
+
+	service->sysfs_name = sysfs_name;
+
+	return 0;
+
+fail_free_sysfs_name:
+	kfree(sysfs_name);
+fail_session_link:
+	sysfs_remove_link(&service->dev.kobj, VS_SESSION_SYMLINK_NAME);
+fail:
+	return ret;
+}
+
+/**
+ * vs_service_register - create and register a new vs_service_device
+ * @session: the session device that is the parent of the service
+ * @owner: the service responsible for managing the new service
+ * @service_id: the ID of the new service
+ * @name: the name of the new service
+ * @protocol: the protocol for the new service
+ * @plat_data: value to be assigned to (struct device *)->platform_data
+ *
+ * This function should only be called by a session driver that is bound to
+ * the given session.
+ *
+ * The given service_id must not have been passed to a prior successful
+ * vs_service_register call, unless the service ID has since been freed by a
+ * call to the session driver's service_removed callback.
+ *
+ * The core service state lock must not be held while calling this function.
+ */
+struct vs_service_device *vs_service_register(struct vs_session_device *session,
+		struct vs_service_device *owner, vs_service_id_t service_id,
+		const char *protocol, const char *name, const void *plat_data)
+{
+	struct vs_service_device *service;
+	struct vs_session_driver *session_drv;
+	int ret = -EIO;
+	char *c;
+
+	if (service_id && !owner) {
+		dev_err(&session->dev, "Non-core service must have an owner\n");
+		ret = -EINVAL;
+		goto fail;
+	} else if (!service_id && owner) {
+		dev_err(&session->dev, "Core service must not have an owner\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (!session->dev.driver)
+		goto fail;
+
+	session_drv = to_vs_session_driver(session->dev.driver);
+
+	service = kzalloc(sizeof(*service), GFP_KERNEL);
+	if (!service) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	INIT_LIST_HEAD(&service->rx_queue);
+	INIT_WORK(&service->rx_work, service_rx_work);
+	INIT_WORK(&service->reset_work, service_reset_work);
+	INIT_DELAYED_WORK(&service->ready_work, service_ready_work);
+	INIT_DELAYED_WORK(&service->cooloff_work, service_cooloff_work);
+	INIT_WORK(&service->cleanup_work, service_cleanup_work);
+	spin_lock_init(&service->rx_lock);
+	init_waitqueue_head(&service->quota_wq);
+
+	service->owner = vs_get_service(owner);
+
+	service->readiness = VS_SERVICE_INIT;
+	mutex_init(&service->ready_lock);
+	service->driver_probed = false;
+
+	/*
+	 * Service state locks - A service is only allowed to use one of these
+	 */
+	spin_lock_init(&service->state_spinlock);
+	mutex_init(&service->state_mutex);
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	service->state_spinlock_used = false;
+	service->state_mutex_used = false;
+#endif
+
+	/* Lock ordering
+	 *
+	 * The dependency order for the various service locks is as follows:
+	 *
+	 * cooloff_work
+	 * reset_work
+	 * ready_work
+	 * ready_lock/0
+	 * rx_work/0
+	 * state_mutex/0
+	 * ready_lock/1
+	 * ...
+	 * state_mutex/n
+	 * state_spinlock
+	 *
+	 * The subclass is the service's rank in the hierarchy of
+	 * service ownership. This results in core having subclass 0 on
+	 * server-side and 1 on client-side. Services directly created
+	 * by the core will have a lock subclass value of 2 for
+	 * servers, 3 for clients. Services created by non-core
+	 * services will have a lock subclass value of x + 1, where x
+	 * is the lock subclass of the creator service. (e.g servers
+	 * will have even numbered lock subclasses, clients will have
+	 * odd numbered lock subclasses).
+	 *
+	 * If a service driver has any additional locks for protecting
+	 * internal state, they will generally fit between state_mutex/n and
+	 * ready_lock/n+1 on this list. For the core service, this applies to
+	 * the session lock.
+	 */
+
+	if (owner)
+		service->lock_subclass = owner->lock_subclass + 2;
+	else
+		service->lock_subclass = session->is_server ? 0 : 1;
+
+#ifdef CONFIG_LOCKDEP
+	if (service->lock_subclass >= MAX_LOCKDEP_SUBCLASSES) {
+		dev_warn(&session->dev, "Owner hierarchy is too deep, lockdep will fail\n");
+	} else {
+		/*
+		 * We need to set the default subclass for the rx work,
+		 * because the workqueue API doesn't (and can't) provide
+		 * anything like lock_nested() for it.
+		 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+		/*
+		 * Lockdep allows a specific lock's subclass to be set with
+		 * the subclass argument to lockdep_init_map(). However, prior
+		 * to Linux 3.3, that only works the first time it is called
+		 * for a given class and subclass. So we have to fake it,
+		 * putting every subclass in a different class, so the only
+		 * thing that breaks is printing the subclass in lockdep
+		 * warnings.
+		 */
+		static struct lock_class_key
+				rx_work_keys[MAX_LOCKDEP_SUBCLASSES];
+		struct lock_class_key *key =
+				&rx_work_keys[service->lock_subclass];
+#else
+		struct lock_class_key *key = service->rx_work.lockdep_map.key;
+#endif
+
+		/*
+		 * We can't use the lockdep_set_class() macro because the
+		 * work's lockdep map is called .lockdep_map instead of
+		 * .dep_map.
+		 */
+		lockdep_init_map(&service->rx_work.lockdep_map,
+				"&service->rx_work", key,
+				service->lock_subclass);
+	}
+#endif
+
+	/*
+	 * Copy the protocol and name. Remove any leading or trailing
+	 * whitespace characters (including newlines) since the strings
+	 * may have been passed via sysfs files.
+	 */
+	if (protocol) {
+		service->protocol = kstrdup(protocol, GFP_KERNEL);
+		if (!service->protocol) {
+			ret = -ENOMEM;
+			goto fail_copy_protocol;
+		}
+		c = strim(service->protocol);
+		if (c != service->protocol)
+			memmove(service->protocol, c,
+					strlen(service->protocol) + 1);
+	}
+
+	service->name = kstrdup(name, GFP_KERNEL);
+	if (!service->name) {
+		ret = -ENOMEM;
+		goto fail_copy_name;
+	}
+	c = strim(service->name);
+	if (c != service->name)
+		memmove(service->name, c, strlen(service->name) + 1);
+
+	service->is_server = session_drv->is_server;
+
+	/* Grab a reference to the session we are on */
+	service->dev.parent = get_device(&session->dev);
+	service->dev.bus = session_drv->service_bus;
+	service->dev.release = vs_service_release;
+
+	service->last_reset = 0;
+	service->last_reset_request = 0;
+	service->last_ready = 0;
+	service->reset_delay = 0;
+
+	device_initialize(&service->dev);
+	service->dev.platform_data = (void *)plat_data;
+
+	ret = service_add_idr(session, service, service_id);
+	if (ret)
+		goto fail_add_idr;
+
+#ifdef CONFIG_VSERVICES_NAMED_DEVICE
+	/* Integrate session and service names in vservice devnodes */
+	dev_set_name(&service->dev, "vservice-%s:%s:%s:%d:%d",
+			session->is_server ? "server" : "client",
+			session->name, service->name,
+			session->session_num, service->id);
+#else
+	dev_set_name(&service->dev, "%s:%d", dev_name(&session->dev),
+			service->id);
+#endif
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	if (service->id > 0)
+		service->dev.devt = MKDEV(vservices_cdev_major,
+			(session->session_num * VS_MAX_SERVICES) +
+			service->id);
+#endif
+
+	service->work_queue = vs_create_workqueue(dev_name(&service->dev));
+	if (!service->work_queue) {
+		ret = -ENOMEM;
+		goto fail_create_workqueue;
+	}
+
+	tasklet_init(&service->rx_tasklet, service_rx_tasklet,
+			(unsigned long)service);
+
+	/*
+	 * If this is the core service, set the core service pointer in the
+	 * session.
+	 */
+	if (service->id == 0) {
+		mutex_lock(&session->service_idr_lock);
+		if (session->core_service) {
+			ret = -EEXIST;
+			mutex_unlock(&session->service_idr_lock);
+			goto fail_become_core;
+		}
+
+		/* Put in vs_session_bus_remove() */
+		session->core_service = vs_get_service(service);
+		mutex_unlock(&session->service_idr_lock);
+	}
+
+	/* Notify the transport */
+	ret = session->transport->vt->service_add(session->transport, service);
+	if (ret) {
+		dev_err(&session->dev,
+				"Failed to add service %d (%s:%s) to transport: %d\n",
+				service->id, service->name,
+				service->protocol, ret);
+		goto fail_transport_add;
+	}
+
+	/* Delay uevent until vs_service_start(). */
+	dev_set_uevent_suppress(&service->dev, true);
+
+	ret = device_add(&service->dev);
+	if (ret)
+		goto fail_device_add;
+
+	/* Create the service statistics sysfs group */
+	ret = sysfs_create_group(&service->dev.kobj, &service_stat_attributes);
+	if (ret)
+		goto fail_sysfs_create_group;
+
+	/* Create additional sysfs files */
+	ret = vs_service_create_sysfs_entries(session, service, service->id);
+	if (ret)
+		goto fail_sysfs_add_entries;
+
+	return service;
+
+fail_sysfs_add_entries:
+	sysfs_remove_group(&service->dev.kobj, &service_stat_attributes);
+fail_sysfs_create_group:
+	device_del(&service->dev);
+fail_device_add:
+	session->transport->vt->service_remove(session->transport, service);
+fail_transport_add:
+	if (service->id == 0) {
+		session->core_service = NULL;
+		vs_put_service(service);
+	}
+fail_become_core:
+fail_create_workqueue:
+	vs_session_release_service_id(service);
+fail_add_idr:
+	/*
+	 * device_initialize() has been called, so we must call put_device()
+	 * and let vs_service_release() handle the rest of the cleanup.
+	 */
+	put_device(&service->dev);
+	return ERR_PTR(ret);
+
+fail_copy_name:
+	if (service->protocol)
+		kfree(service->protocol);
+fail_copy_protocol:
+	kfree(service);
+fail:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(vs_service_register);
+
+/**
+ * vs_session_get_service - Look up a service by ID on a session and get
+ * a reference to it. The caller must call vs_put_service when it is finished
+ * with the service.
+ *
+ * @session: The session to search for the service on
+ * @service_id: ID of the service to find
+ */
+struct vs_service_device *
+vs_session_get_service(struct vs_session_device *session,
+		vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+
+	if (!session)
+		return NULL;
+
+	rcu_read_lock();
+	service = idr_find(&session->service_idr, service_id);
+	if (!service) {
+		rcu_read_unlock();
+		return NULL;
+	}
+	vs_get_service(service);
+	rcu_read_unlock();
+
+	return service;
+}
+EXPORT_SYMBOL_GPL(vs_session_get_service);
+
+/**
+ * __for_each_service - Iterate over all non-core services on a session.
+ *
+ * @session: Session to iterate services on
+ * @func: Callback function for each iterated service
+ *
+ * Iterate over all services on a session, excluding the core service, and
+ * call a callback function on each.
+ */
+static void __for_each_service(struct vs_session_device *session,
+		void (*func)(struct vs_service_device *))
+{
+	struct vs_service_device *service;
+	int id;
+
+	for (id = 1; ; id++) {
+		rcu_read_lock();
+		service = idr_get_next(&session->service_idr, &id);
+		if (!service) {
+			rcu_read_unlock();
+			break;
+		}
+		vs_get_service(service);
+		rcu_read_unlock();
+
+		func(service);
+		vs_put_service(service);
+	}
+}
+
+/**
+ * vs_session_delete_noncore - immediately delete all non-core services
+ * @session: the session whose services are to be deleted
+ *
+ * This function disables and deletes all non-core services without notifying
+ * the core service. It must only be called by the core service, with its state
+ * lock held. It is used when the core service client disconnects or
+ * resets, and when the core service server has its driver removed.
+ */
+void vs_session_delete_noncore(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service __maybe_unused =
+			session->core_service;
+
+	lockdep_assert_held(&core_service->state_mutex);
+
+	vs_session_disable_noncore(session);
+
+	__for_each_service(session, delete_service);
+}
+EXPORT_SYMBOL_GPL(vs_session_delete_noncore);
+
+/**
+ * vs_session_for_each_service - Iterate over all initialised and non-deleted
+ * non-core services on a session.
+ *
+ * @session: Session to iterate services on
+ * @func: Callback function for each iterated service
+ * @data: Extra data to pass to the callback
+ *
+ * Iterate over all services on a session, excluding the core service and any
+ * service that has been deleted or has not yet had vs_service_start() called,
+ * and call a callback function on each. The callback function is called with
+ * the service's ready lock held.
+ */
+void vs_session_for_each_service(struct vs_session_device *session,
+		void (*func)(struct vs_service_device *, void *), void *data)
+{
+	struct vs_service_device *service;
+	int id;
+
+	for (id = 1; ; id++) {
+		rcu_read_lock();
+		service = idr_get_next(&session->service_idr, &id);
+		if (!service) {
+			rcu_read_unlock();
+			break;
+		}
+		vs_get_service(service);
+		rcu_read_unlock();
+
+		mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+		if (service->readiness != VS_SERVICE_LOCAL_DELETE &&
+				service->readiness != VS_SERVICE_DELETED &&
+				service->readiness != VS_SERVICE_INIT)
+			func(service, data);
+
+		mutex_unlock(&service->ready_lock);
+		vs_put_service(service);
+	}
+}
+
+static void force_disable_service(struct vs_service_device *service,
+		void *unused)
+{
+	lockdep_assert_held(&service->ready_lock);
+
+	if (service->readiness == VS_SERVICE_ACTIVE)
+		__reset_service(service, false);
+
+	disable_service(service, true);
+}
+
+/**
+ * vs_session_disable_noncore - immediately disable all non-core services
+ * @session: the session whose services are to be disabled
+ *
+ * This function must be called by the core service driver to disable all
+ * services, whenever it resets or is otherwise disconnected. It is called
+ * directly by the server-side core service, and by the client-side core
+ * service via vs_session_delete_noncore().
+ */
+void vs_session_disable_noncore(struct vs_session_device *session)
+{
+	vs_session_for_each_service(session, force_disable_service, NULL);
+}
+EXPORT_SYMBOL_GPL(vs_session_disable_noncore);
+
+static void try_enable_service(struct vs_service_device *service, void *unused)
+{
+	lockdep_assert_held(&service->ready_lock);
+
+	__enable_service(service);
+}
+
+/**
+ * vs_session_enable_noncore - enable all disabled non-core services
+ * @session: the session whose services are to be enabled
+ *
+ * This function is called by the core server driver to enable all services
+ * when the core client connects.
+ */
+void vs_session_enable_noncore(struct vs_session_device *session)
+{
+	vs_session_for_each_service(session, try_enable_service, NULL);
+}
+EXPORT_SYMBOL_GPL(vs_session_enable_noncore);
+
+/**
+ * vs_session_handle_message - process an incoming message from a transport
+ * @session: the session that is receiving the message
+ * @mbuf: a buffer containing the message payload
+ * @service_id: the id of the service that the message was addressed to
+ *
+ * This routine will return 0 if the buffer was accepted, or a negative value
+ * otherwise. In the latter case the caller should free the buffer. If the
+ * error is fatal, this routine will reset the service.
+ *
+ * This routine may be called from interrupt context.
+ *
+ * The caller must always serialise calls to this function relative to
+ * vs_session_handle_reset and vs_session_handle_activate. We don't do this
+ * internally, to avoid having to disable interrupts when called from task
+ * context.
+ */
+int vs_session_handle_message(struct vs_session_device *session,
+		struct vs_mbuf *mbuf, vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+	struct vs_transport *transport;
+	unsigned long flags;
+
+	transport = session->transport;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service) {
+		dev_err(&session->dev, "message for unknown service %d\n",
+				service_id);
+		session_fatal_error(session, GFP_ATOMIC);
+		return -ENOTCONN;
+	}
+
+	/*
+	 * Take the rx lock before checking service readiness. This guarantees
+	 * that if __reset_service() has just made the service inactive, we
+	 * either see it and don't enqueue the message, or else enqueue the
+	 * message before cancel_pending_rx() runs (and removes it).
+	 */
+	spin_lock_irqsave(&service->rx_lock, flags);
+
+	/* If the service is not active, drop the message. */
+	if (service->readiness != VS_SERVICE_ACTIVE) {
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+		vs_put_service(service);
+		return -ECONNRESET;
+	}
+
+	list_add_tail(&mbuf->queue, &service->rx_queue);
+	spin_unlock_irqrestore(&service->rx_lock, flags);
+
+	/* Schedule processing of the message by the service's drivers. */
+	queue_rx_work(service);
+	vs_put_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_message);
+
+/**
+ * vs_session_quota_available - notify a service that it can transmit
+ * @session: the session owning the service that is ready
+ * @service_id: the id of the service that is ready
+ * @count: the number of buffers that just became ready
+ * @call_tx_ready: true if quota has just become nonzero due to a buffer being
+ *                 freed by the remote communication partner
+ *
+ * This routine is called by the transport driver when a send-direction
+ * message buffer becomes free. It wakes up any task that is waiting for
+ * send quota to become available.
+ *
+ * This routine may be called from interrupt context from the transport
+ * driver, and as such, it may not sleep.
+ *
+ * The caller must always serialise calls to this function relative to
+ * vs_session_handle_reset and vs_session_handle_activate. We don't do this
+ * internally, to avoid having to disable interrupts when called from task
+ * context.
+ *
+ * If the call_tx_ready argument is true, this function also schedules a
+ * call to the driver's tx_ready callback. Note that this never has priority
+ * over handling incoming messages; it will only be handled once the receive
+ * queue is empty. This is to increase batching of outgoing messages, and also
+ * to reduce the chance that an outgoing message will be dropped by the partner
+ * because an incoming message has already changed the state.
+ *
+ * In general, task context drivers should use the waitqueue, and softirq
+ * context drivers (with tx_atomic set) should use tx_ready.
+ */
+void vs_session_quota_available(struct vs_session_device *session,
+		vs_service_id_t service_id, unsigned count,
+		bool send_tx_ready)
+{
+	struct vs_service_device *service;
+	unsigned long flags;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service) {
+		dev_err(&session->dev, "tx ready for unknown service %d\n",
+				service_id);
+		session_fatal_error(session, GFP_ATOMIC);
+		return;
+	}
+
+	wake_up_nr(&service->quota_wq, count);
+
+	if (send_tx_ready) {
+		/*
+		 * Take the rx lock before checking service readiness. This
+		 * guarantees that if __reset_service() has just made the
+		 * service inactive, we either see it and don't set the tx_ready
+		 * flag, or else set the flag before cancel_pending_rx() runs
+		 * (and clears it).
+		 */
+		spin_lock_irqsave(&service->rx_lock, flags);
+
+		/* If the service is not active, drop the tx_ready event */
+		if (service->readiness != VS_SERVICE_ACTIVE) {
+			spin_unlock_irqrestore(&service->rx_lock, flags);
+			vs_put_service(service);
+			return;
+		}
+
+		service->tx_ready = true;
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+
+		/* Schedule RX processing by the service driver. */
+		queue_rx_work(service);
+	}
+
+	vs_put_service(service);
+}
+EXPORT_SYMBOL_GPL(vs_session_quota_available);
+
+/**
+ * vs_session_handle_notify - process an incoming notification from a transport
+ * @session: the session that is receiving the notification
+ * @flags: notification flags
+ * @service_id: the id of the service that the notification was addressed to
+ *
+ * This function may be called from interrupt context from the transport driver,
+ * and as such, it may not sleep.
+ */
+void vs_session_handle_notify(struct vs_session_device *session,
+		unsigned long bits, vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+	struct vs_service_driver *driver;
+	unsigned long flags;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service) {
+		/* Ignore the notification since the service id doesn't exist */
+		dev_err(&session->dev, "notification for unknown service %d\n",
+				service_id);
+		return;
+	}
+
+	/*
+	 * Take the rx lock before checking service readiness. This guarantees
+	 * that if __reset_service() has just made the service inactive, we
+	 * either see it and don't send the notification, or else send it
+	 * before cancel_pending_rx() runs (and thus before the driver is
+	 * deactivated).
+	 */
+	spin_lock_irqsave(&service->rx_lock, flags);
+
+	/* If the service is not active, drop the notification. */
+	if (service->readiness != VS_SERVICE_ACTIVE) {
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+		vs_put_service(service);
+		return;
+	}
+
+	/* There should be a driver bound on the service */
+	if (WARN_ON(!service->dev.driver)) {
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+		vs_put_service(service);
+		return;
+	}
+
+	driver = to_vs_service_driver(service->dev.driver);
+	/* Call the driver's notify function */
+	driver->notify(service, bits);
+
+	spin_unlock_irqrestore(&service->rx_lock, flags);
+	vs_put_service(service);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_notify);
+
+static unsigned long reset_cool_off(struct vs_service_device *service)
+{
+	return service->reset_delay * RESET_THROTTLE_COOL_OFF_MULT;
+}
+
+static bool ready_needs_delay(struct vs_service_device *service)
+{
+	/*
+	 * We throttle resets if too little time elapsed between the service
+	 * last becoming ready, and the service last starting a reset.
+	 *
+	 * We do not use the current time here because it includes the time
+	 * taken by the local service driver to actually process the reset.
+	 */
+	return service->last_reset && service->last_ready && time_before(
+			service->last_reset,
+			service->last_ready + RESET_THROTTLE_TIME);
+}
+
+static bool reset_throttle_cooled_off(struct vs_service_device *service)
+{
+	/*
+	 * Reset throttling cools off if enough time has elapsed since the
+	 * last reset request.
+	 *
+	 * We check against the last requested reset, not the last serviced
+	 * reset or ready. If we are throttling, a reset may not have been
+	 * serviced for some time even though we are still receiving requests.
+	 */
+	return service->reset_delay && service->last_reset_request &&
+			time_after(jiffies, service->last_reset_request +
+					reset_cool_off(service));
+}
+
+/*
+ * Queue up the ready work for a service. If a service is resetting too fast
+ * then it will be throttled using an exponentially increasing delay before
+ * marking it ready. If the reset speed backs off then the ready throttling
+ * will be cleared. If a service reaches the maximum throttling delay then all
+ * resets will be ignored until the cool off period has elapsed.
+ *
+ * The basic logic of the reset throttling is:
+ *
+ *  - If a reset request is processed and the last ready was less than
+ *    RESET_THROTTLE_TIME ago, then the ready needs to be delayed to
+ *    throttle resets.
+ *
+ *  - The ready delay increases exponentially on each throttled reset
+ *    between RESET_THROTTLE_MIN and RESET_THROTTLE_MAX.
+ *
+ *  - If RESET_THROTTLE_MAX is reached then no ready will be sent until the
+ *    reset requests have cooled off.
+ *
+ *  - Reset requests have cooled off when no reset requests have been
+ *    received for RESET_THROTTLE_COOL_OFF_MULT * the service's current
+ *    ready delay. The service's reset throttling is disabled.
+ *
+ * Note: Be careful when adding print statements, including debugging, to
+ * this function. The ready throttling is intended to prevent DOSing of the
+ * vServices due to repeated resets (e.g. because of a persistent failure).
+ * Adding a printk on each reset for example would reset in syslog spamming
+ * which is a DOS attack in itself.
+ *
+ * The ready lock must be held by the caller.
+ */
+static void queue_ready_work(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	unsigned long delay;
+	bool wait_for_cooloff = false;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	/* This should only be called when the service enters reset. */
+	WARN_ON(service->readiness != VS_SERVICE_RESET);
+
+	if (ready_needs_delay(service)) {
+		/* Reset delay increments exponentially */
+		if (!service->reset_delay) {
+			service->reset_delay = RESET_THROTTLE_MIN;
+		} else if (service->reset_delay < RESET_THROTTLE_MAX) {
+			service->reset_delay *= 2;
+		} else {
+			wait_for_cooloff = true;
+		}
+
+		delay = service->reset_delay;
+	} else {
+		/* The reset request appears to have been be sane. */
+		delay = 0;
+
+	}
+
+	if (service->reset_delay > 0) {
+		/*
+		 * Schedule cooloff work, to set the reset_delay to 0 if
+		 * the reset requests stop for long enough.
+		 */
+		schedule_delayed_work(&service->cooloff_work,
+				reset_cool_off(service));
+	}
+
+	if (wait_for_cooloff) {
+		/*
+		 * We need to finish cooling off before we service resets
+		 * again. Schedule cooloff_work to run after the current
+		 * cooloff period ends; it may reschedule itself even later
+		 * if any more requests arrive.
+		 */
+		dev_err(&session->dev,
+				"Service %s is resetting too fast - must cool off for %u ms\n",
+				dev_name(&service->dev),
+				jiffies_to_msecs(reset_cool_off(service)));
+		return;
+	}
+
+	if (delay)
+		dev_err(&session->dev,
+				"Service %s is resetting too fast - delaying ready by %u ms\n",
+				dev_name(&service->dev),
+				jiffies_to_msecs(delay));
+
+	vs_debug(VS_DEBUG_SESSION, session,
+			"Service %s will become ready in %u ms\n",
+			dev_name(&service->dev),
+			jiffies_to_msecs(delay));
+
+	if (service->last_ready)
+		vs_debug(VS_DEBUG_SESSION, session,
+				"Last became ready %u ms ago\n",
+				msecs_ago(service->last_ready));
+	if (service->reset_delay >= RESET_THROTTLE_MAX)
+		dev_err(&session->dev, "Service %s hit max reset throttle\n",
+				dev_name(&service->dev));
+
+	schedule_delayed_work(&service->ready_work, delay);
+}
+
+static void session_activation_work(struct work_struct *work)
+{
+	struct vs_session_device *session = container_of(work,
+			struct vs_session_device, activation_work);
+	struct vs_service_device *core_service = session->core_service;
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	int activation_state;
+	int ret;
+
+	if (WARN_ON(!core_service))
+		return;
+
+	if (WARN_ON(!session_drv))
+		return;
+
+	/*
+	 * We use an atomic to prevent duplicate activations if we race with
+	 * an activate after a reset. This is very unlikely, but possible if
+	 * this work item is preempted.
+	 */
+	activation_state = atomic_cmpxchg(&session->activation_state,
+			VS_SESSION_ACTIVATE, VS_SESSION_ACTIVE);
+
+	switch (activation_state) {
+	case VS_SESSION_ACTIVATE:
+		vs_debug(VS_DEBUG_SESSION, session,
+				"core service will be activated\n");
+		vs_service_enable(core_service);
+		break;
+
+	case VS_SESSION_RESET:
+		vs_debug(VS_DEBUG_SESSION, session,
+				"core service will be deactivated\n");
+
+		/* Handle the core service reset */
+		ret = service_handle_reset(session, core_service, true);
+
+		/* Tell the transport if the reset succeeded */
+		if (ret >= 0)
+			session->transport->vt->ready(session->transport);
+		else
+			dev_err(&session->dev, "core service reset unhandled: %d\n",
+					ret);
+
+		break;
+
+	default:
+		vs_debug(VS_DEBUG_SESSION, session,
+				"core service already active\n");
+		break;
+	}
+}
+
+/**
+ * vs_session_handle_reset - Handle a reset at the session layer.
+ * @session: Session to reset
+ *
+ * This function is called by the transport when it receives a transport-level
+ * reset notification.
+ *
+ * After a session is reset by calling this function, it will reset all of its
+ * attached services, and then call the transport's ready callback. The
+ * services will remain in reset until the session is re-activated by a call
+ * to vs_session_handle_activate().
+ *
+ * Calling this function on a session that is already reset is permitted, as
+ * long as the transport accepts the consequent duplicate ready callbacks.
+ *
+ * A newly created session is initially in the reset state, and will not call
+ * the transport's ready callback. The transport may choose to either act as
+ * if the ready callback had been called, or call this function again to
+ * trigger a new ready callback.
+ */
+void vs_session_handle_reset(struct vs_session_device *session)
+{
+	atomic_set(&session->activation_state, VS_SESSION_RESET);
+
+	schedule_work(&session->activation_work);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_reset);
+
+/**
+ * vs_session_handle_activate - Allow a session to leave the reset state.
+ * @session: Session to mark active.
+ *
+ * This function is called by the transport when a transport-level reset is
+ * completed; that is, after the session layer has reset its services and
+ * called the ready callback, at *both* ends of the connection.
+ */
+void vs_session_handle_activate(struct vs_session_device *session)
+{
+	atomic_set(&session->activation_state, VS_SESSION_ACTIVATE);
+
+	schedule_work(&session->activation_work);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_activate);
+
+static ssize_t id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", session->session_num);
+}
+
+/*
+ * The vServices session device type
+ */
+static ssize_t is_server_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", session->is_server);
+}
+
+static ssize_t name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", session->name);
+}
+
+#ifdef CONFIG_VSERVICES_DEBUG
+static ssize_t debug_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%.8lx\n", session->debug_mask);
+}
+
+static ssize_t debug_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	int err;
+
+	err = kstrtoul(buf, 0, &session->debug_mask);
+	if (err)
+		return err;
+
+	/* Clear any bits we don't know about */
+	session->debug_mask &= VS_DEBUG_ALL;
+
+	return count;
+}
+#endif /* CONFIG_VSERVICES_DEBUG */
+
+static struct device_attribute vservices_session_dev_attrs[] = {
+	__ATTR_RO(id),
+	__ATTR_RO(is_server),
+	__ATTR_RO(name),
+#ifdef CONFIG_VSERVICES_DEBUG
+	__ATTR(debug_mask, S_IRUGO | S_IWUSR,
+			debug_mask_show, debug_mask_store),
+#endif
+	__ATTR_NULL,
+};
+
+static int vs_session_free_idr(struct vs_session_device *session)
+{
+	mutex_lock(&vs_session_lock);
+	idr_remove(&session_idr, session->session_num);
+	mutex_unlock(&vs_session_lock);
+	return 0;
+}
+
+static void vs_session_device_release(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	vs_session_free_idr(session);
+
+	kfree(session->name);
+	kfree(session);
+}
+
+/*
+ * The vServices session bus
+ */
+static int vs_session_bus_match(struct device *dev,
+		struct device_driver *driver)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_session_driver *session_drv = to_vs_session_driver(driver);
+
+	return (session->is_server == session_drv->is_server);
+}
+
+static int vs_session_bus_remove(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_service_device *core_service = session->core_service;
+
+	if (!core_service)
+		return 0;
+
+	/*
+	 * Abort any pending session activation. We rely on the transport to
+	 * not call vs_session_handle_activate after this point.
+	 */
+	cancel_work_sync(&session->activation_work);
+
+	/* Abort any pending fatal error handling, which is redundant now. */
+	cancel_work_sync(&session->fatal_error_work);
+
+	/*
+	 * Delete the core service. This will implicitly delete everything
+	 * else (in reset on the client side, and in release on the server
+	 * side). The session holds a reference, so this won't release the
+	 * service struct.
+	 */
+	delete_service(core_service);
+
+	/* Now clean up the core service. */
+	session->core_service = NULL;
+
+	/* Matches the get in vs_service_register() */
+	vs_put_service(core_service);
+
+	return 0;
+}
+
+static int vservices_session_uevent(struct device *dev,
+		struct kobj_uevent_env *env)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	dev_dbg(dev, "uevent\n");
+
+	if (add_uevent_var(env, "IS_SERVER=%d", session->is_server))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SESSION_ID=%d", session->session_num))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void vservices_session_shutdown(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	dev_dbg(dev, "shutdown\n");
+
+	/* Do a transport reset */
+	session->transport->vt->reset(session->transport);
+}
+
+struct bus_type vs_session_bus_type = {
+	.name		= "vservices-session",
+	.match		= vs_session_bus_match,
+	.remove		= vs_session_bus_remove,
+	.dev_attrs	= vservices_session_dev_attrs,
+	.uevent		= vservices_session_uevent,
+	.shutdown	= vservices_session_shutdown,
+};
+EXPORT_SYMBOL_GPL(vs_session_bus_type);
+
+/*
+ * Common code for the vServices client and server buses
+ */
+int vs_service_bus_probe(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(dev->driver);
+	struct vs_session_device *session = vs_service_get_session(service);
+	int ret;
+
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev, "probe\n");
+
+	/*
+	 * Increase the reference count on the service driver. We don't allow
+	 * service driver modules to be removed if there are any device
+	 * instances present. The devices must be explicitly removed first.
+	 */
+	if (!try_module_get(vsdrv->driver.owner))
+		return -ENODEV;
+
+	ret = vsdrv->probe(service);
+	if (ret) {
+		module_put(vsdrv->driver.owner);
+		return ret;
+	}
+
+	service->driver_probed = true;
+
+	try_start_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_probe);
+
+int vs_service_bus_remove(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(dev->driver);
+	int err = 0;
+
+	reset_service(service);
+
+	/* Prevent reactivation of the driver */
+	service->driver_probed = false;
+
+	/* The driver has now had its reset() callback called; remove it */
+	vsdrv->remove(service);
+
+	/*
+	 * Take the service's state mutex and spinlock. This ensures that any
+	 * thread that is calling vs_state_lock_safe[_bh] will either complete
+	 * now, or see the driver removal and fail, irrespective of which type
+	 * of lock it is using.
+	 */
+	mutex_lock_nested(&service->state_mutex, service->lock_subclass);
+	spin_lock_bh(&service->state_spinlock);
+
+	/* Release all the locks. */
+	spin_unlock_bh(&service->state_spinlock);
+	mutex_unlock(&service->state_mutex);
+	mutex_unlock(&service->ready_lock);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	service->state_spinlock_used = false;
+	service->state_mutex_used = false;
+#endif
+
+	module_put(vsdrv->driver.owner);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_remove);
+
+int vs_service_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	dev_dbg(dev, "uevent\n");
+
+	if (add_uevent_var(env, "IS_SERVER=%d", service->is_server))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SERVICE_ID=%d", service->id))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SESSION_ID=%d", session->session_num))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SERVICE_NAME=%s", service->name))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "PROTOCOL=%s", service->protocol ?: ""))
+		return -ENOMEM;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_uevent);
+
+static int vs_session_create_sysfs_entry(struct vs_transport *transport,
+		struct vs_session_device *session, bool server,
+		const char *transport_name)
+{
+	char *sysfs_name;
+	struct kobject *sysfs_parent = vservices_client_root;
+
+	if (!transport_name)
+		return -EINVAL;
+
+	sysfs_name = kasprintf(GFP_KERNEL, "%s:%s", transport->type,
+			transport_name);
+	if (!sysfs_name)
+		return -ENOMEM;
+
+	if (server)
+		sysfs_parent = vservices_server_root;
+
+	session->sysfs_entry = kobject_create_and_add(sysfs_name, sysfs_parent);
+
+	kfree(sysfs_name);
+	if (!session->sysfs_entry)
+		return -ENOMEM;
+	return 0;
+}
+
+static int vs_session_alloc_idr(struct vs_session_device *session)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	int err, id;
+
+retry:
+	if (!idr_pre_get(&session_idr, GFP_KERNEL))
+		return -ENOMEM;
+
+	mutex_lock(&vs_session_lock);
+	err = idr_get_new_above(&session_idr, session, 0, &id);
+	if (err == 0) {
+		if (id >= VS_MAX_SESSIONS) {
+			/* We are out of session ids */
+			idr_remove(&session_idr, id);
+			mutex_unlock(&vs_session_lock);
+			return -EBUSY;
+		}
+	}
+	mutex_unlock(&vs_session_lock);
+	if (err == -EAGAIN)
+		goto retry;
+	if (err < 0)
+		return err;
+#else
+	int id;
+
+	mutex_lock(&vs_session_lock);
+	id = idr_alloc(&session_idr, session, 0, VS_MAX_SESSIONS, GFP_KERNEL);
+	mutex_unlock(&vs_session_lock);
+
+	if (id == -ENOSPC)
+		return -EBUSY;
+	else if (id < 0)
+		return id;
+#endif
+
+	session->session_num = id;
+	return 0;
+}
+
+/**
+ * vs_session_register - register a vservices session on a transport
+ * @transport: vservices transport that the session will attach to
+ * @parent: device that implements the transport (for sysfs)
+ * @server: true if the session is server-side
+ * @transport_name: name of the transport
+ *
+ * This function is intended to be called from the probe() function of a
+ * transport driver. It sets up a new session device, which then either
+ * performs automatic service discovery (for clients) or creates sysfs nodes
+ * that allow the user to create services (for servers).
+ *
+ * Note that the parent is only used by the driver framework; it is not
+ * directly accessed by the session drivers. Thus, a single transport device
+ * can support multiple sessions, as long as they each have a unique struct
+ * vs_transport.
+ *
+ * Note: This function may sleep, and therefore must not be called from
+ * interrupt context.
+ *
+ * Returns a pointer to the new device, or an error pointer.
+ */
+struct vs_session_device *vs_session_register(struct vs_transport *transport,
+		struct device *parent, bool server, const char *transport_name)
+{
+	struct device *dev;
+	struct vs_session_device *session;
+	int ret = -ENOMEM;
+
+	WARN_ON(!transport);
+
+	session = kzalloc(sizeof(*session), GFP_KERNEL);
+	if (!session)
+		goto fail_session_alloc;
+
+	session->transport = transport;
+	session->is_server = server;
+	session->name = kstrdup(transport_name, GFP_KERNEL);
+	if (!session->name)
+		goto fail_free_session;
+
+	INIT_WORK(&session->activation_work, session_activation_work);
+	INIT_WORK(&session->fatal_error_work, session_fatal_error_work);
+
+#ifdef CONFIG_VSERVICES_DEBUG
+	session->debug_mask = default_debug_mask & VS_DEBUG_ALL;
+#endif
+
+	idr_init(&session->service_idr);
+	mutex_init(&session->service_idr_lock);
+
+	/*
+	 * We must create session sysfs entry before device_create
+	 * so, that sysfs entry is available while registering
+	 * core service.
+	 */
+	ret = vs_session_create_sysfs_entry(transport, session, server,
+			transport_name);
+	if (ret)
+		goto fail_free_session;
+
+	ret = vs_session_alloc_idr(session);
+	if (ret)
+		goto fail_sysfs_entry;
+
+	dev = &session->dev;
+	dev->parent = parent;
+	dev->bus = &vs_session_bus_type;
+	dev->release = vs_session_device_release;
+	dev_set_name(dev, "vservice:%d", session->session_num);
+
+	ret = device_register(dev);
+	if (ret) {
+		goto fail_session_map;
+	}
+
+	/* Add a symlink to transport device inside session device sysfs dir */
+	if (parent) {
+		ret = sysfs_create_link(&session->dev.kobj,
+				&parent->kobj, VS_TRANSPORT_SYMLINK_NAME);
+		if (ret) {
+			dev_err(&session->dev,
+					"Error %d creating transport symlink\n",
+					ret);
+			goto fail_session_device_unregister;
+		}
+	}
+
+	return session;
+
+fail_session_device_unregister:
+	device_unregister(&session->dev);
+	kobject_put(session->sysfs_entry);
+	/* Remaining cleanup will be done in vs_session_release */
+	return ERR_PTR(ret);
+fail_session_map:
+	vs_session_free_idr(session);
+fail_sysfs_entry:
+	kobject_put(session->sysfs_entry);
+fail_free_session:
+	kfree(session->name);
+	kfree(session);
+fail_session_alloc:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(vs_session_register);
+
+void vs_session_start(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service = session->core_service;
+
+	if (WARN_ON(!core_service))
+		return;
+
+	blocking_notifier_call_chain(&vs_session_notifier_list,
+			VS_SESSION_NOTIFY_ADD, session);
+
+	vs_service_start(core_service);
+}
+EXPORT_SYMBOL_GPL(vs_session_start);
+
+/**
+ * vs_session_unregister - unregister a session device
+ * @session: the session device to unregister
+ */
+void vs_session_unregister(struct vs_session_device *session)
+{
+	if (session->dev.parent)
+		sysfs_remove_link(&session->dev.kobj, VS_TRANSPORT_SYMLINK_NAME);
+	blocking_notifier_call_chain(&vs_session_notifier_list,
+			VS_SESSION_NOTIFY_REMOVE, session);
+
+	device_unregister(&session->dev);
+
+	kobject_put(session->sysfs_entry);
+}
+EXPORT_SYMBOL_GPL(vs_session_unregister);
+
+struct service_unbind_work_struct {
+	struct vs_service_device *service;
+	struct work_struct work;
+};
+
+static void service_unbind_work(struct work_struct *work)
+{
+	struct service_unbind_work_struct *unbind_work = container_of(work,
+			struct service_unbind_work_struct, work);
+
+	device_release_driver(&unbind_work->service->dev);
+
+	/* Matches vs_get_service() in vs_session_unbind_driver() */
+	vs_put_service(unbind_work->service);
+	kfree(unbind_work);
+}
+
+int vs_session_unbind_driver(struct vs_service_device *service)
+{
+	struct service_unbind_work_struct *unbind_work =
+			kmalloc(sizeof(*unbind_work), GFP_KERNEL);
+
+	if (!unbind_work)
+		return -ENOMEM;
+
+	INIT_WORK(&unbind_work->work, service_unbind_work);
+
+	/* Put in service_unbind_work() */
+	unbind_work->service = vs_get_service(service);
+	schedule_work(&unbind_work->work);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_session_unbind_driver);
+
+static int __init vservices_init(void)
+{
+	int r;
+
+	printk(KERN_INFO "vServices Framework 1.0\n");
+
+	vservices_root = kobject_create_and_add("vservices", NULL);
+	if (!vservices_root) {
+		r = -ENOMEM;
+		goto fail_create_root;
+	}
+
+	r = bus_register(&vs_session_bus_type);
+	if (r < 0)
+		goto fail_bus_register;
+
+	r = vs_devio_init();
+	if (r < 0)
+		goto fail_devio_init;
+
+	return 0;
+
+fail_devio_init:
+	bus_unregister(&vs_session_bus_type);
+fail_bus_register:
+	kobject_put(vservices_root);
+fail_create_root:
+	return r;
+}
+
+static void __exit vservices_exit(void)
+{
+	printk(KERN_INFO "vServices Framework exit\n");
+
+	vs_devio_exit();
+	bus_unregister(&vs_session_bus_type);
+	kobject_put(vservices_root);
+}
+
+subsys_initcall(vservices_init);
+module_exit(vservices_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Session");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/session.h b/drivers/vservices/session.h
new file mode 100644
index 0000000..f51d535
--- /dev/null
+++ b/drivers/vservices/session.h
@@ -0,0 +1,173 @@
+/*
+ * drivers/vservices/session.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Definitions related to the vservices session bus and its client and server
+ * session drivers. The interfaces in this file are implementation details of
+ * the vServices framework and should not be used by transport or service
+ * drivers.
+ */
+
+#ifndef _VSERVICES_SESSION_PRIV_H_
+#define _VSERVICES_SESSION_PRIV_H_
+
+/* Maximum number of sessions allowed */
+#define VS_MAX_SESSIONS 64
+
+#include "debug.h"
+
+/* For use by the core server */
+#define VS_SERVICE_AUTO_ALLOCATE_ID	0xffff
+#define VS_SERVICE_ALREADY_RESET	1
+
+/*
+ * The upper bits of the service id are reserved for transport driver specific
+ * use. The reserve bits are always zeroed out above the transport layer.
+ */
+#define VS_SERVICE_ID_TRANSPORT_BITS	4
+#define VS_SERVICE_ID_TRANSPORT_OFFSET	12
+#define VS_SERVICE_ID_TRANSPORT_MASK ((1 << VS_SERVICE_ID_TRANSPORT_BITS) - 1)
+#define VS_SERVICE_ID_MASK \
+	(~(VS_SERVICE_ID_TRANSPORT_MASK << VS_SERVICE_ID_TRANSPORT_OFFSET))
+
+/* Number of bits needed to represent the service id range as a bitmap. */
+#define VS_SERVICE_ID_BITMAP_BITS \
+	(1 << ((sizeof(vs_service_id_t) * 8) - VS_SERVICE_ID_TRANSPORT_BITS))
+
+/* High service ids are reserved for use by the transport drivers */
+#define VS_SERVICE_ID_RESERVED(x) \
+	((1 << VS_SERVICE_ID_TRANSPORT_OFFSET) - (x))
+
+#define VS_SERVICE_ID_RESERVED_1	VS_SERVICE_ID_RESERVED(1)
+
+/* Name of the session device symlink in service device sysfs directory */
+#define VS_SESSION_SYMLINK_NAME		"session"
+
+/* Name of the transport device symlink in session device sysfs directory */
+#define VS_TRANSPORT_SYMLINK_NAME	"transport"
+
+static inline unsigned int
+vs_get_service_id_reserved_bits(vs_service_id_t service_id)
+{
+	return (service_id >> VS_SERVICE_ID_TRANSPORT_OFFSET) &
+			VS_SERVICE_ID_TRANSPORT_MASK;
+}
+
+static inline vs_service_id_t vs_get_real_service_id(vs_service_id_t service_id)
+{
+	return service_id & VS_SERVICE_ID_MASK;
+}
+
+static inline void vs_set_service_id_reserved_bits(vs_service_id_t *service_id,
+		unsigned int reserved_bits)
+{
+	*service_id &= ~(VS_SERVICE_ID_TRANSPORT_MASK <<
+			VS_SERVICE_ID_TRANSPORT_OFFSET);
+	*service_id |= (reserved_bits & VS_SERVICE_ID_TRANSPORT_MASK) <<
+			VS_SERVICE_ID_TRANSPORT_OFFSET;
+}
+
+extern struct bus_type vs_session_bus_type;
+extern struct kobject *vservices_root;
+extern struct kobject *vservices_server_root;
+extern struct kobject *vservices_client_root;
+
+/**
+ * struct vs_session_driver - Session driver
+ * @driver: Linux device model driver structure
+ * @service_bus: Pointer to either the server or client bus type
+ * @is_server: True if this driver is for a server session, false if it is for
+ * a client session
+ * @service_added: Called when a non-core service is added.
+ * @service_start: Called when a non-core service is started.
+ * @service_local_reset: Called when an active non-core service driver becomes
+ * inactive.
+ * @service_removed: Called when a non-core service is removed.
+ */
+struct vs_session_driver {
+	struct device_driver driver;
+	struct bus_type *service_bus;
+	bool is_server;
+
+	/* These are all called with the core service state lock held. */
+	int (*service_added)(struct vs_session_device *session,
+			struct vs_service_device *service);
+	int (*service_start)(struct vs_session_device *session,
+			struct vs_service_device *service);
+	int (*service_local_reset)(struct vs_session_device *session,
+			struct vs_service_device *service);
+	int (*service_removed)(struct vs_session_device *session,
+			struct vs_service_device *service);
+};
+
+#define to_vs_session_driver(drv) \
+	container_of(drv, struct vs_session_driver, driver)
+
+/* Service lookup */
+extern struct vs_service_device * vs_session_get_service(
+		struct vs_session_device *session,
+		vs_service_id_t service_id);
+
+/* Service creation & destruction */
+extern struct vs_service_device *
+vs_service_register(struct vs_session_device *session,
+		struct vs_service_device *parent,
+		vs_service_id_t service_id,
+		const char *protocol,
+		const char *name,
+		const void *plat_data);
+
+extern bool vs_service_start(struct vs_service_device *service);
+
+extern int vs_service_delete(struct vs_service_device *service,
+		struct vs_service_device *caller);
+
+extern int vs_service_handle_delete(struct vs_service_device *service);
+
+/* Service reset handling */
+extern int vs_service_handle_reset(struct vs_session_device *session,
+		vs_service_id_t service_id, bool disable);
+extern int vs_service_enable(struct vs_service_device *service);
+
+extern void vs_session_enable_noncore(struct vs_session_device *session);
+extern void vs_session_disable_noncore(struct vs_session_device *session);
+extern void vs_session_delete_noncore(struct vs_session_device *session);
+
+/* Service bus driver management */
+extern int vs_service_bus_probe(struct device *dev);
+extern int vs_service_bus_remove(struct device *dev);
+extern int vs_service_bus_uevent(struct device *dev,
+		struct kobj_uevent_env *env);
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+
+extern int vs_devio_init(void);
+extern void vs_devio_exit(void);
+
+extern struct vs_service_device *vs_service_lookup_by_devt(dev_t dev);
+
+extern struct vs_service_driver vs_devio_server_driver;
+extern struct vs_service_driver vs_devio_client_driver;
+
+extern int vservices_cdev_major;
+
+#else /* !CONFIG_VSERVICES_CHAR_DEV */
+
+static inline int vs_devio_init(void)
+{
+	return 0;
+}
+
+static inline void vs_devio_exit(void)
+{
+}
+
+#endif /* !CONFIG_VSERVICES_CHAR_DEV */
+
+#endif /* _VSERVICES_SESSION_PRIV_H_ */
diff --git a/drivers/vservices/skeleton_driver.c b/drivers/vservices/skeleton_driver.c
new file mode 100644
index 0000000..cfbc5df
--- /dev/null
+++ b/drivers/vservices/skeleton_driver.c
@@ -0,0 +1,133 @@
+/*
+ * drivers/vservices/skeleton_driver.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Skeleton testing driver for templating vService client/server drivers
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+struct skeleton_info {
+	unsigned dummy;
+};
+
+static void vs_skeleton_handle_start(struct vs_service_device *service)
+{
+	/* NOTE: Do not change this message - is it used for system testing */
+	dev_info(&service->dev, "skeleton handle_start\n");
+}
+
+static int vs_skeleton_handle_message(struct vs_service_device *service,
+					  struct vs_mbuf *mbuf)
+{
+	dev_info(&service->dev, "skeleton handle_messasge\n");
+	return -EBADMSG;
+}
+
+static void vs_skeleton_handle_notify(struct vs_service_device *service,
+					  u32 flags)
+{
+	dev_info(&service->dev, "skeleton handle_notify\n");
+}
+
+static void vs_skeleton_handle_reset(struct vs_service_device *service)
+{
+	dev_info(&service->dev, "skeleton handle_reset %s service %d\n",
+			service->is_server ? "server" : "client", service->id);
+}
+
+static int vs_skeleton_probe(struct vs_service_device *service)
+{
+	struct skeleton_info *info;
+	int err = -ENOMEM;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		goto fail;
+
+	dev_set_drvdata(&service->dev, info);
+	return 0;
+
+fail:
+	return err;
+}
+
+static int vs_skeleton_remove(struct vs_service_device *service)
+{
+	struct skeleton_info *info = dev_get_drvdata(&service->dev);
+
+	dev_info(&service->dev, "skeleton remove\n");
+	kfree(info);
+	return 0;
+}
+
+static struct vs_service_driver server_skeleton_driver = {
+	.protocol	= "com.ok-labs.skeleton",
+	.is_server	= true,
+	.probe		= vs_skeleton_probe,
+	.remove		= vs_skeleton_remove,
+	.start		= vs_skeleton_handle_start,
+	.receive	= vs_skeleton_handle_message,
+	.notify		= vs_skeleton_handle_notify,
+	.reset		= vs_skeleton_handle_reset,
+	.driver		= {
+		.name		= "vs-server-skeleton",
+		.owner		= THIS_MODULE,
+		.bus		= &vs_server_bus_type,
+	},
+};
+
+static struct vs_service_driver client_skeleton_driver = {
+	.protocol	= "com.ok-labs.skeleton",
+	.is_server	= false,
+	.probe		= vs_skeleton_probe,
+	.remove		= vs_skeleton_remove,
+	.start		= vs_skeleton_handle_start,
+	.receive	= vs_skeleton_handle_message,
+	.notify		= vs_skeleton_handle_notify,
+	.reset		= vs_skeleton_handle_reset,
+	.driver		= {
+		.name		= "vs-client-skeleton",
+		.owner		= THIS_MODULE,
+		.bus		= &vs_client_bus_type,
+	},
+};
+
+static int __init vs_skeleton_init(void)
+{
+	int ret;
+
+	ret = driver_register(&server_skeleton_driver.driver);
+	if (ret)
+		return ret;
+
+	ret = driver_register(&client_skeleton_driver.driver);
+	if (ret)
+		driver_unregister(&server_skeleton_driver.driver);
+
+	return ret;
+}
+
+static void __exit vs_skeleton_exit(void)
+{
+	driver_unregister(&server_skeleton_driver.driver);
+	driver_unregister(&client_skeleton_driver.driver);
+}
+
+module_init(vs_skeleton_init);
+module_exit(vs_skeleton_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Skeleton Client/Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/transport.h b/drivers/vservices/transport.h
new file mode 100644
index 0000000..8e5055c
--- /dev/null
+++ b/drivers/vservices/transport.h
@@ -0,0 +1,40 @@
+/*
+ * include/vservices/transport.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the private interface that vServices transport drivers
+ * must provide to the vservices session and protocol layers. The transport,
+ * transport vtable, and message buffer structures are defined in the public
+ * <vservices/transport.h> header.
+ */
+
+#ifndef _VSERVICES_TRANSPORT_PRIV_H_
+#define _VSERVICES_TRANSPORT_PRIV_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+
+/**
+ * struct vs_notify_info - Notification information stored in the transport
+ * @service_id: Service id for this notification info
+ * @offset: Offset into the notification mapping
+ */
+struct vs_notify_info {
+	vs_service_id_t service_id;
+	unsigned offset;
+};
+
+#define VS_MAX_SERVICES		128
+#define VS_MAX_SERVICE_ID	(VS_MAX_SERVICES - 1)
+
+#endif /* _VSERVICES_TRANSPORT_PRIV_H_ */
diff --git a/drivers/vservices/transport/Kconfig b/drivers/vservices/transport/Kconfig
new file mode 100644
index 0000000..37e84c4
--- /dev/null
+++ b/drivers/vservices/transport/Kconfig
@@ -0,0 +1,20 @@
+#
+# vServices Transport driver configuration
+#
+
+menu "Transport drivers"
+
+config VSERVICES_OKL4_AXON
+	tristate "OKL4 Microvisor Axon driver"
+	depends on VSERVICES_SUPPORT && OKL4_GUEST
+	default y
+	help
+	  This option adds support for Virtual Services sessions using an OKL4
+	  Microvisor Axon object as a transport.
+
+	  If this driver is to be used in a Cell that has multiple
+	  discontiguous regions in its physical memory pool, the
+	  CONFIG_DMA_CMA option must also be selected (or CONFIG_CMA
+	  in older kernels that do not have CONFIG_DMA_CMA).
+
+endmenu
diff --git a/drivers/vservices/transport/Makefile b/drivers/vservices/transport/Makefile
new file mode 100644
index 0000000..222fb51
--- /dev/null
+++ b/drivers/vservices/transport/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Werror
+ccflags-$(CONFIG_VSERVICES_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VSERVICES_OKL4_AXON)	+= vtransport_axon.o
+vtransport_axon-objs = axon.o
diff --git a/drivers/vservices/transport/axon.c b/drivers/vservices/transport/axon.c
new file mode 100644
index 0000000..a140b4a
--- /dev/null
+++ b/drivers/vservices/transport/axon.c
@@ -0,0 +1,3573 @@
+/*
+ * drivers/vservices/transport/axon.c
+ *
+ * Copyright (c) 2015-2018 General Dynamics
+ * Copyright (c) 2015 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the OKL4 Virtual Services transport driver for OKL4 Microvisor
+ * Axons (virtual inter-Cell DMA engines).
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/jiffies.h>
+#include <linux/log2.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/dma-contiguous.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+#include <asm/dma-contiguous.h>
+#endif
+#include <linux/vmalloc.h>
+#include <linux/mmzone.h>
+#include <asm-generic/okl4_virq.h>
+#include <asm/byteorder.h>
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+
+#include <microvisor/microvisor.h>
+
+#include "../transport.h"
+#include "../session.h"
+#include "../debug.h"
+
+#define DRIVER_AUTHOR "Cog Systems Pty Ltd"
+#define DRIVER_DESC "OKL4 vServices Axon Transport Driver"
+#define DRIVER_NAME "vtransport_axon"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || \
+	defined(CONFIG_NO_DEPRECATED_MEMORY_BARRIERS)
+#define smp_mb__before_atomic_dec smp_mb__before_atomic
+#define smp_mb__before_atomic_inc smp_mb__before_atomic
+#define smp_mb__after_atomic_dec smp_mb__after_atomic
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+#define DMA_ATTRS unsigned long
+#else
+#define DMA_ATTRS struct dma_attrs *
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && \
+	!defined(CONFIG_CMA)
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+	return NULL;
+}
+#endif
+
+static struct kmem_cache *mbuf_cache;
+
+struct child_device {
+	struct device *dev;
+	struct list_head list;
+};
+
+/* Number of services in the transport array to allocate at a time */
+#define SERVICES_ALLOC_CHUNK	16
+#define MSG_SEND_FREE_BUFS	VS_SERVICE_ID_RESERVED_1
+
+/* The maximum value we allow for the free_bufs_balance counter */
+#define MAX_BALANCE		1
+
+/*
+ * The free bufs quota must be enough to take free_bufs_balance from its
+ * minimum to its maximum.
+ */
+#define FREE_BUFS_QUOTA		(MAX_BALANCE * 2)
+
+/*
+ * The free bufs retry delay is the period in jiffies that we delay retrying
+ * after an out-of-memory condition when trying to send a free bufs message.
+ */
+#define FREE_BUFS_RETRY_DELAY	2
+
+/* The minimum values we permit for queue and message size. */
+#define MIN_QUEUE_SIZE		((size_t)4)
+#define MIN_MSG_SIZE		(32 - sizeof(vs_service_id_t))
+
+/*
+ * The maximum size for a batched receive. This should be larger than the
+ * maximum message size, and large enough to avoid excessive context switching
+ * overheads, yet small enough to avoid blocking the tasklet queue for too
+ * long.
+ */
+#define MAX_TRANSFER_CHUNK	65536
+
+#define INC_MOD(x, m) {						\
+	x++;							\
+	if (x == m) x = 0;					\
+}
+
+/* Local Axon cleanup workqueue */
+struct workqueue_struct *work_queue;
+
+/*
+ * True if there is only one physical segment being used for kernel memory
+ * allocations. If this is false, the device must have a usable CMA region.
+ */
+static bool okl4_single_physical_segment;
+
+/* OKL4 MMU capability. */
+static okl4_kcap_t okl4_mmu_cap;
+
+/*
+ * Per-service TX buffer allocation pool.
+ *
+ * We cannot use a normal DMA pool for TX buffers, because alloc_mbuf can be
+ * called with GFP_ATOMIC, and a normal DMA pool alloc will take pages from
+ * a global emergency pool if GFP_WAIT is not set. The emergency pool is not
+ * guaranteed to be in the same physical segment as this device's DMA region,
+ * so it might not be usable by the axon.
+ *
+ * Using a very simple allocator with preallocated memory also speeds up the
+ * TX path.
+ *
+ * RX buffers use a standard Linux DMA pool, shared between all services,
+ * rather than this struct. They are preallocated by definition, so the speed
+ * of the allocator doesn't matter much for them. Also, they're always
+ * allocated with GFP_KERNEL (which includes GFP_WAIT) so the normal DMA pool
+ * will use memory from the axon's contiguous region.
+ */
+struct vs_axon_tx_pool {
+	struct vs_transport_axon *transport;
+	struct kref kref;
+
+	void *base_vaddr;
+	dma_addr_t base_laddr;
+
+	unsigned alloc_order;
+	unsigned count;
+
+	struct work_struct free_work;
+	unsigned long alloc_bitmap[];
+};
+
+struct vs_axon_rx_freelist_entry {
+	struct list_head list;
+	dma_addr_t laddr;
+};
+
+/* Service info */
+struct vs_mv_service_info {
+	struct vs_service_device *service;
+
+	/* True if the session has started the service */
+	bool ready;
+
+	/* Number of send buffers we have allocated, in total. */
+	atomic_t send_inflight;
+
+	/*
+	 * Number of send buffers we have allocated but not yet sent.
+	 * This should always be zero if ready is false.
+	 */
+	atomic_t send_alloc;
+
+	/*
+	 * Number of receive buffers we have received and not yet freed.
+	 * This should always be zero if ready is false.
+	 */
+	atomic_t recv_inflight;
+
+	/*
+	 * Number of receive buffers we have freed, but not told the other end
+	 * about yet.
+	 *
+	 * The watermark is the maximum number of freed buffers we can
+	 * accumulate before we send a dummy message to the remote end to ack
+	 * them. This is used in situations where the protocol allows the remote
+	 * end to reach its send quota without guaranteeing a reply; the dummy
+	 * message lets it make progress even if our service driver doesn't send
+	 * an answer that we can piggy-back the acks on.
+	 */
+	atomic_t recv_freed;
+	unsigned int recv_freed_watermark;
+
+	/*
+	 * Number of buffers that have been left allocated after a reset. If
+	 * this count is nonzero, then the service has been disabled by the
+	 * session layer, and needs to be re-enabled when it reaches zero.
+	 */
+	atomic_t outstanding_frees;
+
+	/* TX allocation pool */
+	struct vs_axon_tx_pool *tx_pool;
+
+	/* RX allocation count */
+	unsigned rx_allocated;
+
+	/* Reference count for this info struct. */
+	struct kref kref;
+
+	/* RCU head for cleanup */
+	struct rcu_head rcu_head;
+};
+
+/*
+ * Transport readiness state machine
+ *
+ * This is similar to the service readiness state machine, but simpler,
+ * because there are fewer transition triggers.
+ *
+ * The states are:
+ * INIT: Initial state. This occurs transiently during probe.
+ * LOCAL_RESET: We have initiated a reset at this end, but the remote end has
+ * not yet acknowledged it. We will enter the RESET state on receiving
+ * acknowledgement.
+ * RESET: The transport is inactive at both ends, and the session layer has
+ * not yet told us to start activating.
+ * LOCAL_READY: The session layer has told us to start activating, and we
+ * have notified the remote end that we're ready.
+ * REMOTE_READY: The remote end has notified us that it is ready, but the
+ * local session layer hasn't decided to become ready yet.
+ * ACTIVE: Both ends are ready to communicate.
+ * SHUTDOWN: The transport is shutting down and should not become ready.
+ */
+enum vs_transport_readiness {
+	VS_TRANSPORT_INIT = 0,
+	VS_TRANSPORT_LOCAL_RESET,
+	VS_TRANSPORT_RESET,
+	VS_TRANSPORT_LOCAL_READY,
+	VS_TRANSPORT_REMOTE_READY,
+	VS_TRANSPORT_ACTIVE,
+	VS_TRANSPORT_SHUTDOWN,
+};
+
+/*
+ * Transport reset / ready VIRQ payload bits
+ */
+enum vs_transport_reset_virq {
+	VS_TRANSPORT_VIRQ_RESET_REQ = (1 << 0),
+	VS_TRANSPORT_VIRQ_RESET_ACK = (1 << 1),
+	VS_TRANSPORT_VIRQ_READY = (1 << 2),
+};
+
+/*
+ * Internal definitions of the transport and message buffer structures.
+ */
+#define MAX_NOTIFICATION_LINES 16 /* Enough for 512 notifications each way */
+
+struct vs_transport_axon {
+	struct device *axon_dev;
+
+	struct okl4_axon_tx *tx;
+	struct okl4_axon_queue_entry *tx_descs;
+	struct vs_axon_tx_pool **tx_pools;
+	struct okl4_axon_rx *rx;
+	struct okl4_axon_queue_entry *rx_descs;
+	void **rx_ptrs;
+
+	dma_addr_t tx_phys, rx_phys;
+	size_t tx_size, rx_size;
+
+	okl4_kcap_t segment;
+	okl4_laddr_t segment_base;
+
+	okl4_kcap_t tx_cap, rx_cap, reset_cap;
+	unsigned int tx_irq, rx_irq, reset_irq;
+	okl4_interrupt_number_t reset_okl4_irq;
+
+	unsigned int notify_tx_nirqs;
+	okl4_kcap_t notify_cap[MAX_NOTIFICATION_LINES];
+	unsigned int notify_rx_nirqs;
+	unsigned int notify_irq[MAX_NOTIFICATION_LINES];
+
+	bool is_server;
+	size_t msg_size, queue_size;
+
+	/*
+	 * The handle to the device tree node for the virtual-session node
+	 * associated with the axon.
+	 */
+	struct device_node *of_node;
+
+	struct list_head child_dev_list;
+
+	/*
+	 * Hold queue and tx tasklet used to buffer and resend mbufs blocked
+	 * by a full outgoing axon queue, due to a slow receiver or a halted
+	 * axon.
+	 */
+	struct list_head tx_queue;
+	struct tasklet_struct tx_tasklet;
+	u32 tx_uptr_freed;
+
+	/*
+	 * The readiness state of the transport, and a spinlock protecting it.
+	 * Note that this is different to the session's readiness state
+	 * machine, though it has the same basic purpose.
+	 */
+	enum vs_transport_readiness readiness;
+	spinlock_t readiness_lock;
+
+	struct tasklet_struct rx_tasklet;
+	struct timer_list rx_retry_timer;
+	struct list_head rx_freelist;
+	u32 rx_alloc_extra;
+	struct dma_pool *rx_pool;
+	spinlock_t rx_alloc_lock;
+	u32 rx_uptr_allocated;
+
+	struct vs_session_device *session_dev;
+	struct vs_transport transport;
+
+	DECLARE_BITMAP(service_bitmap, VS_SERVICE_ID_BITMAP_BITS);
+
+	struct delayed_work free_bufs_work;
+
+	/*
+	 * Freed buffers messages balance counter. This counter is incremented
+	 * when we send a freed buffers message and decremented when we receive
+	 * one. If the balance is negative then we need to send a message
+	 * as an acknowledgement to the other end, even if there are no
+	 * freed buffers to acknowledge.
+	 */
+	atomic_t free_bufs_balance;
+
+	/*
+	 * Flag set when a service exceeds its freed buffers watermark,
+	 * telling free_bufs_work to send a message when the balance
+	 * counter is non-negative. This is ignored, and a message is
+	 * sent in any case, if the balance is negative.
+	 */
+	bool free_bufs_pending;
+
+	/* Pool for allocating outgoing free bufs messages */
+	struct vs_axon_tx_pool *free_bufs_pool;
+};
+
+#define to_vs_transport_axon(t) \
+	container_of(t, struct vs_transport_axon, transport)
+
+struct vs_mbuf_axon {
+	struct vs_mbuf base;
+	struct vs_transport_axon *owner;
+	dma_addr_t laddr;
+	struct vs_axon_tx_pool *pool;
+};
+
+#define to_vs_mbuf_axon(b) container_of(b, struct vs_mbuf_axon, base)
+
+/*
+ * Buffer allocation
+ *
+ * Buffers used by axons must be allocated within a single contiguous memory
+ * region, backed by a single OKL4 physical segment. This is similar to how
+ * the DMA allocator normally works, but we can't use the normal DMA allocator
+ * because the platform code will remap the allocated memory with caching
+ * disabled.
+ *
+ * We borrow the useful parts of the DMA allocator by providing our own DMA
+ * mapping ops which don't actually remap the memory.
+ */
+static void *axon_dma_alloc(struct device *dev, size_t size,
+		dma_addr_t *handle, gfp_t gfp, DMA_ATTRS attrs)
+{
+	unsigned long order;
+	size_t count;
+	struct page *page;
+	void *ptr;
+
+	*handle = DMA_ERROR_CODE;
+	size = PAGE_ALIGN(size);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+	if (!(gfp & __GFP_WAIT))
+#else
+	if (!(gfp & __GFP_RECLAIM))
+#endif
+		return NULL;
+
+	order = get_order(size);
+	count = size >> PAGE_SHIFT;
+
+	if (dev_get_cma_area(dev)) {
+		page = dma_alloc_from_contiguous(dev, count, order);
+
+		if (!page)
+			return NULL;
+	} else {
+		struct page *p, *e;
+		page = alloc_pages(gfp, order);
+
+		if (!page)
+			return NULL;
+
+		/* Split huge page and free any excess pages */
+		split_page(page, order);
+		for (p = page + count, e = page + (1 << order); p < e; p++)
+			__free_page(p);
+	}
+
+	if (PageHighMem(page)) {
+		struct vm_struct *area = get_vm_area(size, VM_USERMAP);
+		if (!area)
+			goto free_pages;
+		ptr = area->addr;
+		area->phys_addr = __pfn_to_phys(page_to_pfn(page));
+
+		if (ioremap_page_range((unsigned long)ptr,
+					(unsigned long)ptr + size,
+					area->phys_addr, PAGE_KERNEL)) {
+			vunmap(ptr);
+			goto free_pages;
+		}
+	} else {
+		ptr = page_address(page);
+	}
+
+	*handle = (dma_addr_t)page_to_pfn(page) << PAGE_SHIFT;
+
+	dev_dbg(dev, "dma_alloc: %#tx bytes at %pK (%#llx), %s cma, %s high\n",
+			size, ptr, (long long)*handle,
+			dev_get_cma_area(dev) ? "is" : "not",
+			PageHighMem(page) ? "is" : "not");
+
+	return ptr;
+
+free_pages:
+	if (dev_get_cma_area(dev)) {
+		dma_release_from_contiguous(dev, page, count);
+	} else {
+		struct page *e = page + count;
+
+		while (page < e) {
+			__free_page(page);
+			page++;
+		}
+	}
+
+	return NULL;
+}
+
+static void axon_dma_free(struct device *dev, size_t size, void *cpu_addr,
+		dma_addr_t handle, DMA_ATTRS attrs)
+{
+	struct page *page = pfn_to_page(handle >> PAGE_SHIFT);
+
+	size = PAGE_ALIGN(size);
+
+	if (PageHighMem(page)) {
+		unmap_kernel_range((unsigned long)cpu_addr, size);
+		vunmap(cpu_addr);
+	}
+
+	if (dev_get_cma_area(dev)) {
+		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+	} else {
+		struct page *e = page + (size >> PAGE_SHIFT);
+
+		while (page < e) {
+			__free_page(page);
+			page++;
+		}
+	}
+}
+
+struct dma_map_ops axon_dma_ops = {
+	.alloc		= axon_dma_alloc,
+	.free		= axon_dma_free,
+};
+
+/*
+ * Quotas
+ * ------
+ *
+ * Each service has two quotas, one for send and one for receive. The
+ * send quota is incremented when we allocate an mbuf. The send quota
+ * is decremented by receiving an freed buffer ack from the remove
+ * end, either in the reserved bits of the service id or in a special
+ * free bufs message.
+ *
+ * The receive quota is incremented whenever we receive a message and
+ * decremented when we free the mbuf. Exceeding the receive quota
+ * indicates that something bad has happened since the other end's
+ * send quota should have prevented it from sending the
+ * message. Exceeding the receive quota indicates a driver bug since
+ * the two ends are disagreeing about the quotas. If this happens then
+ * a warning is printed and the offending service is reset.
+ */
+
+/*
+ * The base of the mbuf has the destination service id, but we pass the
+ * data pointer starting after the service id. The following helper
+ * functions are used to avoid ugly pointer arithmetic when handling
+ * mbufs.
+ */
+static size_t mbuf_real_size(struct vs_mbuf_axon *mbuf)
+{
+	return mbuf->base.size + sizeof(vs_service_id_t);
+}
+
+static void *mbuf_real_base(struct vs_mbuf_axon *mbuf)
+{
+	return mbuf->base.data - sizeof(vs_service_id_t);
+}
+/*
+ * Get the service_id and reserved bits from a message buffer and the
+ * clear the reserved bits so the upper layers don't see them.
+ */
+vs_service_id_t
+transport_get_mbuf_service_id(struct vs_transport_axon *transport,
+		void *data, unsigned int *freed_acks)
+{
+	unsigned int reserved_bits;
+	vs_service_id_t id;
+
+	/* Get the real service id and reserved bits */
+	id = *(vs_service_id_t *)data;
+	reserved_bits = vs_get_service_id_reserved_bits(id);
+	id = vs_get_real_service_id(id);
+
+	/* Clear the reserved bits in the service id */
+	vs_set_service_id_reserved_bits(&id, 0);
+	if (freed_acks) {
+		*(vs_service_id_t *)data = id;
+		*freed_acks = reserved_bits;
+	}
+	return id;
+}
+
+static void
+__transport_get_service_info(struct vs_mv_service_info *service_info)
+{
+	kref_get(&service_info->kref);
+}
+
+static struct vs_mv_service_info *
+transport_get_service_info(struct vs_service_device *service)
+{
+	struct vs_mv_service_info *service_info;
+
+	rcu_read_lock();
+	service_info = rcu_dereference(service->transport_priv);
+	if (service_info)
+		__transport_get_service_info(service_info);
+	rcu_read_unlock();
+
+	return service_info;
+}
+
+static struct vs_mv_service_info *
+transport_get_service_id_info(struct vs_transport_axon *transport,
+		vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+	struct vs_mv_service_info *service_info;
+
+	service = vs_session_get_service(transport->session_dev, service_id);
+	if (!service)
+		return NULL;
+
+	service_info = transport_get_service_info(service);
+
+	vs_put_service(service);
+	return service_info;
+}
+
+static void transport_info_free(struct rcu_head *rcu_head)
+{
+	struct vs_mv_service_info *service_info =
+		container_of(rcu_head, struct vs_mv_service_info, rcu_head);
+
+	vs_put_service(service_info->service);
+	kfree(service_info);
+}
+
+static void transport_info_release(struct kref *kref)
+{
+	struct vs_mv_service_info *service_info =
+		container_of(kref, struct vs_mv_service_info, kref);
+
+	call_rcu(&service_info->rcu_head, transport_info_free);
+}
+
+static void transport_put_service_info(struct vs_mv_service_info *service_info)
+{
+	kref_put(&service_info->kref, transport_info_release);
+}
+
+static bool transport_axon_reset(struct vs_transport_axon *transport);
+
+static void transport_fatal_error(struct vs_transport_axon *transport,
+		const char *msg)
+{
+	dev_err(transport->axon_dev, "Fatal transport error (%s); resetting\n",
+			msg);
+#ifdef DEBUG
+	dump_stack();
+#endif
+	transport_axon_reset(transport);
+}
+
+static unsigned int reduce_send_quota(struct vs_transport_axon *transport,
+		struct vs_mv_service_info *service_info, unsigned int count,
+		bool allow_tx_ready)
+{
+	int new_inflight, send_alloc;
+	bool was_over_quota, is_over_quota;
+
+        /* FIXME: Redmine issue #1303 - philip. */
+	spin_lock_irq(&transport->readiness_lock);
+	/*
+	 * We read the current send_alloc for error checking *before*
+	 * decrementing send_inflight. This avoids any false positives
+	 * due to send_alloc being incremented by a concurrent alloc_mbuf.
+	 *
+	 * Note that there is an implicit smp_mb() before atomic_sub_return(),
+	 * matching the explicit one in alloc_mbuf.
+	 */
+	send_alloc = atomic_read(&service_info->send_alloc);
+	new_inflight = atomic_sub_return(count, &service_info->send_inflight);
+
+	spin_unlock_irq(&transport->readiness_lock);
+	if (WARN_ON(new_inflight < send_alloc)) {
+		dev_err(transport->axon_dev,
+				"inflight sent messages for service %d is less than the number of allocated messages (%d < %d, was reduced by %d)\n",
+				service_info->service->id, new_inflight,
+				send_alloc, count);
+		transport_fatal_error(transport, "sent msg count underrun");
+		return 0;
+	}
+
+	was_over_quota = (new_inflight + count >=
+			service_info->service->send_quota);
+	is_over_quota = (new_inflight > service_info->service->send_quota);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"Service %d quota %d -> %d (over_quota: %d -> %d)\n",
+			service_info->service->id, new_inflight + count,
+			new_inflight, was_over_quota, is_over_quota);
+
+	/*
+	 * Notify the service that a buffer has been freed. We call tx_ready
+	 * if this is a notification from the remote end (i.e. not an unsent
+	 * buffer) and the quota has just dropped below the maximum.
+	 */
+	vs_session_quota_available(transport->session_dev,
+			service_info->service->id, count,
+			!is_over_quota && was_over_quota && allow_tx_ready);
+
+	return count;
+}
+
+static void __transport_tx_pool_free(struct vs_axon_tx_pool *pool,
+		dma_addr_t laddr);
+
+static void
+__transport_tx_cleanup(struct vs_transport_axon *transport)
+{
+	u32 uptr;
+	struct okl4_axon_queue_entry *desc;
+
+	lockdep_assert_held(&transport->readiness_lock);
+
+	uptr = transport->tx_uptr_freed;
+	desc = &transport->tx_descs[uptr];
+
+	while (!okl4_axon_data_info_getpending(&desc->info)) {
+		if (!transport->tx_pools[uptr])
+			break;
+
+		__transport_tx_pool_free(transport->tx_pools[uptr],
+				okl4_axon_data_info_getladdr(&desc->info));
+		transport->tx_pools[uptr] = NULL;
+
+		INC_MOD(uptr, transport->tx->queues[0].entries);
+		desc = &transport->tx_descs[uptr];
+		transport->tx_uptr_freed = uptr;
+	}
+}
+
+static void
+transport_axon_free_tx_pool(struct work_struct *work)
+{
+	struct vs_axon_tx_pool *pool = container_of(work,
+			struct vs_axon_tx_pool, free_work);
+	struct vs_transport_axon *transport = pool->transport;
+
+	dmam_free_coherent(transport->axon_dev,
+			pool->count << pool->alloc_order,
+			pool->base_vaddr, pool->base_laddr);
+	devm_kfree(transport->axon_dev, pool);
+}
+
+static void
+transport_axon_queue_free_tx_pool(struct kref *kref)
+{
+	struct vs_axon_tx_pool *pool = container_of(kref,
+			struct vs_axon_tx_pool, kref);
+
+	/*
+	 * Put the task on the axon local work queue for running in
+	 * a context where IRQ is enabled.
+	 */
+	INIT_WORK(&pool->free_work, transport_axon_free_tx_pool);
+	queue_work(work_queue, &pool->free_work);
+}
+
+static void
+transport_axon_put_tx_pool(struct vs_axon_tx_pool *pool)
+{
+	kref_put(&pool->kref, transport_axon_queue_free_tx_pool);
+}
+
+/* Low-level tx buffer allocation, without quota tracking. */
+static struct vs_mbuf_axon *
+__transport_alloc_mbuf(struct vs_transport_axon *transport,
+		vs_service_id_t service_id, struct vs_axon_tx_pool *pool,
+		size_t size, gfp_t gfp_flags)
+{
+	size_t real_size = size + sizeof(vs_service_id_t);
+	struct vs_mbuf_axon *mbuf;
+	unsigned index;
+
+	if (WARN_ON(real_size > (1 << pool->alloc_order))) {
+		dev_err(transport->axon_dev, "Message too big (%zu > %zu)\n",
+				real_size, (size_t)1 << pool->alloc_order);
+		goto fail_message_size;
+	}
+
+	kref_get(&pool->kref);
+
+	do {
+		index = find_first_zero_bit(pool->alloc_bitmap, pool->count);
+		if (unlikely(index >= pool->count)) {
+			/*
+			 * No buffers left. This can't be an out-of-quota
+			 * situation, because we've already checked the quota;
+			 * it must be because there's a buffer left over in
+			 * the tx queue. Clean out the tx queue and retry.
+			 */
+			spin_lock_irq(&transport->readiness_lock);
+			__transport_tx_cleanup(transport);
+			spin_unlock_irq(&transport->readiness_lock);
+
+			index = find_first_zero_bit(pool->alloc_bitmap,
+					pool->count);
+		}
+		if (unlikely(index >= pool->count))
+			goto fail_buffer_alloc;
+	} while (unlikely(test_and_set_bit_lock(index, pool->alloc_bitmap)));
+
+	mbuf = kmem_cache_alloc(mbuf_cache, gfp_flags & ~GFP_ZONEMASK);
+	if (!mbuf)
+		goto fail_mbuf_alloc;
+
+	mbuf->base.is_recv = false;
+	mbuf->base.data = pool->base_vaddr + (index << pool->alloc_order);
+	mbuf->base.size = size;
+	mbuf->owner = transport;
+	mbuf->laddr = pool->base_laddr + (index << pool->alloc_order);
+	mbuf->pool = pool;
+
+	/*
+	 * We put the destination service id in the mbuf, but increment the
+	 * data pointer past it so the receiver doesn't always need to skip
+	 * the service id.
+	 */
+	*(vs_service_id_t *)mbuf->base.data = service_id;
+	mbuf->base.data += sizeof(vs_service_id_t);
+
+	return mbuf;
+
+fail_mbuf_alloc:
+	clear_bit_unlock(index, pool->alloc_bitmap);
+fail_buffer_alloc:
+	transport_axon_put_tx_pool(pool);
+fail_message_size:
+	return NULL;
+}
+
+/* Allocate a tx buffer for a specified service. */
+static struct vs_mbuf *transport_alloc_mbuf(struct vs_transport *_transport,
+		struct vs_service_device *service, size_t size, gfp_t gfp_flags)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	size_t real_size = size + sizeof(vs_service_id_t);
+	struct vs_mv_service_info *service_info = NULL;
+	struct vs_mbuf_axon *mbuf;
+	vs_service_id_t service_id = service->id;
+
+	if (real_size > transport->msg_size) {
+		dev_err(transport->axon_dev, "Message too big (%zu > %zu)\n",
+				real_size, transport->msg_size);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (WARN_ON(service_id == MSG_SEND_FREE_BUFS))
+		return ERR_PTR(-ENXIO);
+
+	service_info = transport_get_service_info(service);
+	if (WARN_ON(!service_info))
+		return ERR_PTR(-EINVAL);
+
+	if (!service_info->tx_pool) {
+		transport_put_service_info(service_info);
+		return ERR_PTR(-ECONNRESET);
+	}
+
+	if (!atomic_add_unless(&service_info->send_inflight, 1,
+			service_info->service->send_quota)) {
+		/* Service has reached its quota */
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"Service %d is at max send quota %d\n",
+				service_id, service_info->service->send_quota);
+		transport_put_service_info(service_info);
+		return ERR_PTR(-ENOBUFS);
+	}
+
+	/*
+	 * Increment the count of allocated but unsent mbufs. This is done
+	 * *after* the send_inflight increment (with a barrier to enforce
+	 * ordering) to ensure that send_inflight is never less than
+	 * send_alloc - see reduce_send_quota().
+	 */
+	smp_mb__before_atomic_inc();
+	atomic_inc(&service_info->send_alloc);
+
+	mbuf = __transport_alloc_mbuf(transport, service_id,
+			service_info->tx_pool, size, gfp_flags);
+	if (!mbuf) {
+		/*
+		 * Failed to allocate a buffer - decrement our quota back to
+		 * where it was.
+		 */
+		atomic_dec(&service_info->send_alloc);
+		smp_mb__after_atomic_dec();
+		atomic_dec(&service_info->send_inflight);
+
+		transport_put_service_info(service_info);
+
+		return ERR_PTR(-ENOMEM);
+	}
+
+	transport_put_service_info(service_info);
+
+	return &mbuf->base;
+}
+
+static void transport_free_sent_mbuf(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf)
+{
+	kmem_cache_free(mbuf_cache, mbuf);
+}
+
+static void __transport_tx_pool_free(struct vs_axon_tx_pool *pool,
+		dma_addr_t laddr)
+{
+	unsigned index = (laddr - pool->base_laddr) >> pool->alloc_order;
+
+	if (WARN_ON(index >= pool->count)) {
+		printk(KERN_DEBUG "free %#llx base %#llx order %d count %d\n",
+				(long long)laddr, (long long)pool->base_laddr,
+				pool->alloc_order, pool->count);
+		return;
+	}
+
+	clear_bit_unlock(index, pool->alloc_bitmap);
+	transport_axon_put_tx_pool(pool);
+}
+
+static int transport_rx_queue_buffer(struct vs_transport_axon *transport,
+		void *ptr, dma_addr_t laddr);
+
+static void transport_rx_recycle(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf)
+{
+	void *data = mbuf_real_base(mbuf);
+	dma_addr_t laddr = mbuf->laddr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&transport->rx_alloc_lock, flags);
+
+	if (transport->rx_alloc_extra) {
+		transport->rx_alloc_extra--;
+		dma_pool_free(transport->rx_pool, data, laddr);
+	} else if (transport_rx_queue_buffer(transport, data, laddr) < 0) {
+		struct vs_axon_rx_freelist_entry *buf = data;
+		buf->laddr = laddr;
+		list_add_tail(&buf->list, &transport->rx_freelist);
+		tasklet_schedule(&transport->rx_tasklet);
+	} else {
+		tasklet_schedule(&transport->rx_tasklet);
+	}
+
+	spin_unlock_irqrestore(&transport->rx_alloc_lock, flags);
+}
+
+static void transport_free_mbuf_pools(struct vs_transport_axon *transport,
+		struct vs_service_device *service,
+		struct vs_mv_service_info *service_info)
+{
+	/*
+	 * Free the TX allocation pool. This will also free any buffer
+	 * memory allocated from the pool, so it is essential that
+	 * this happens only after we have successfully freed all
+	 * mbufs.
+	 *
+	 * Note that the pool will not exist if the core client is reset
+	 * before it receives a startup message.
+	 */
+	if (!IS_ERR_OR_NULL(service_info->tx_pool))
+		transport_axon_put_tx_pool(service_info->tx_pool);
+	service_info->tx_pool = NULL;
+
+	/* Mark the service's preallocated RX buffers as extra. */
+	spin_lock_irq(&transport->rx_alloc_lock);
+	transport->rx_alloc_extra += service_info->rx_allocated;
+	service_info->rx_allocated = 0;
+	spin_unlock_irq(&transport->rx_alloc_lock);
+}
+
+/* Low-level tx or rx buffer free, with no quota tracking */
+static void __transport_free_mbuf(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf, bool is_rx)
+{
+	if (is_rx) {
+		transport_rx_recycle(transport, mbuf);
+	} else {
+		__transport_tx_pool_free(mbuf->pool, mbuf->laddr);
+	}
+
+	kmem_cache_free(mbuf_cache, mbuf);
+}
+
+static void transport_free_mbuf(struct vs_transport *_transport,
+		struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+	struct vs_mv_service_info *service_info = NULL;
+	void *data = mbuf_real_base(mbuf);
+	vs_service_id_t service_id __maybe_unused =
+		transport_get_mbuf_service_id(transport, data, NULL);
+	bool is_recv = mbuf->base.is_recv;
+
+	WARN_ON(!service);
+	service_info = transport_get_service_info(service);
+
+	__transport_free_mbuf(transport, mbuf, is_recv);
+
+	/*
+	 * If this message was left over from a service that has already been
+	 * deleted, we don't need to do any quota accounting.
+	 */
+	if (!service_info)
+		return;
+
+	if (unlikely(atomic_read(&service_info->outstanding_frees))) {
+		if (atomic_dec_and_test(&service_info->outstanding_frees)) {
+			dev_dbg(transport->axon_dev,
+				"service %d all outstanding frees done\n",
+				service->id);
+			transport_free_mbuf_pools(transport, service,
+					service_info);
+			vs_service_enable(service);
+		} else {
+			dev_dbg(transport->axon_dev,
+				"service %d outstanding frees -> %d\n",
+				service->id, atomic_read(
+					&service_info->outstanding_frees));
+		}
+	} else if (is_recv) {
+		smp_mb__before_atomic_dec();
+		atomic_dec(&service_info->recv_inflight);
+		if (atomic_inc_return(&service_info->recv_freed) >=
+				service_info->recv_freed_watermark) {
+			transport->free_bufs_pending = true;
+			schedule_delayed_work(&transport->free_bufs_work, 0);
+		}
+
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"Freed recv buffer for service %d rq=%d/%d, freed=%d (watermark = %d)\n",
+				service_id,
+				atomic_read(&service_info->recv_inflight),
+				service_info->service->recv_quota,
+				atomic_read(&service_info->recv_freed),
+				service_info->recv_freed_watermark);
+	} else {
+		/*
+		 * We are freeing a message buffer that we allocated. This
+		 * usually happens on error paths in application drivers if
+		 * we allocated a buffer but failed to send it. In this case
+		 * we need to decrement our own send quota since we didn't
+		 * send anything.
+		 */
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"Freeing send buffer for service %d, send quota = %d\n",
+				service_id, atomic_read(&service_info->send_inflight));
+
+		smp_mb__before_atomic_dec();
+		atomic_dec(&service_info->send_alloc);
+
+		/*
+		 * We don't allow the tx_ready handler to run when we are
+		 * freeing an mbuf that we allocated.
+		 */
+		reduce_send_quota(transport, service_info, 1, false);
+	}
+
+	transport_put_service_info(service_info);
+}
+
+static size_t transport_mbuf_size(struct vs_mbuf *_mbuf)
+{
+	struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+
+	return mbuf_real_size(mbuf);
+}
+
+static size_t transport_max_mbuf_size(struct vs_transport *_transport)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+	return transport->msg_size - sizeof(vs_service_id_t);
+}
+
+static int okl4_error_to_errno(okl4_error_t err) {
+	switch (err) {
+	case OKL4_OK:
+		return 0;
+	case OKL4_ERROR_AXON_QUEUE_NOT_MAPPED:
+		/* Axon has been reset locally */
+		return -ECONNRESET;
+	case OKL4_ERROR_AXON_QUEUE_NOT_READY:
+		/* No message buffers in the queue. */
+		return -ENOBUFS;
+	case OKL4_ERROR_AXON_INVALID_OFFSET:
+	case OKL4_ERROR_AXON_AREA_TOO_BIG:
+		/* Buffer address is bad */
+		return -EFAULT;
+	case OKL4_ERROR_AXON_BAD_MESSAGE_SIZE:
+	case OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED:
+		/* One of the Axon's message size limits has been exceeded */
+		return -EMSGSIZE;
+	default:
+		/* Miscellaneous failure, probably a bad cap */
+		return -EIO;
+	}
+}
+
+static void queue_tx_mbuf(struct vs_mbuf_axon *mbuf, struct vs_transport_axon *priv,
+		vs_service_id_t service_id)
+{
+	list_add_tail(&mbuf->base.queue, &priv->tx_queue);
+}
+
+static void free_tx_mbufs(struct vs_transport_axon *priv)
+{
+	struct vs_mbuf_axon *child, *tmp;
+
+	list_for_each_entry_safe(child, tmp, &priv->tx_queue, base.queue) {
+		list_del(&child->base.queue);
+		__transport_free_mbuf(priv, child, false);
+	}
+}
+
+static int __transport_flush(struct vs_transport_axon *transport)
+{
+	_okl4_sys_axon_trigger_send(transport->tx_cap);
+	return 0;
+}
+
+static int transport_flush(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+	return __transport_flush(transport);
+}
+
+/*
+ * Low-level transport message send function.
+ *
+ * The caller must hold the transport->readiness_lock, and is responsible for
+ * freeing the mbuf on successful send (use transport_free_sent_mbuf). The
+ * mbuf should _not_ be freed if this function fails. The Virtual Service
+ * driver is responsible for freeing the mbuf in the failure case.
+ */
+static int __transport_send(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf, vs_service_id_t service_id,
+		unsigned long flags)
+{
+	u32 uptr;
+	struct okl4_axon_queue_entry *desc;
+	struct vs_axon_tx_pool *old_pool;
+	dma_addr_t old_laddr;
+
+	lockdep_assert_held(&transport->readiness_lock);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"send %zu bytes to service %d\n",
+			mbuf->base.size, service_id);
+	vs_debug_dump_mbuf(transport->session_dev, &mbuf->base);
+
+	uptr = ACCESS_ONCE(transport->tx->queues[0].uptr);
+	desc = &transport->tx_descs[uptr];
+
+	/* Is the descriptor ready to use? */
+	if (okl4_axon_data_info_getpending(&desc->info))
+		return -ENOSPC;
+	mb();
+
+	/* The descriptor is ours; save its old state and increment the uptr */
+	old_pool = transport->tx_pools[uptr];
+	if (old_pool != NULL)
+		old_laddr = okl4_axon_data_info_getladdr(&desc->info);
+	transport->tx_pools[uptr] = mbuf->pool;
+
+	INC_MOD(uptr, transport->tx->queues[0].entries);
+	ACCESS_ONCE(transport->tx->queues[0].uptr) = uptr;
+
+	/* Set up the descriptor */
+	desc->data_size = mbuf_real_size(mbuf);
+	okl4_axon_data_info_setladdr(&desc->info, mbuf->laddr);
+
+	/* Message is ready to go */
+	wmb();
+	okl4_axon_data_info_setpending(&desc->info, true);
+
+	if (flags & VS_TRANSPORT_SEND_FLAGS_MORE) {
+		/*
+		 * This is a batched message, so we normally don't flush,
+		 * unless we've filled the queue completely.
+		 *
+		 * Races on the queue descriptor don't matter here, because
+		 * this is only an optimisation; the service should do an
+		 * explicit flush when it finishes the batch anyway.
+		 */
+		desc = &transport->tx_descs[uptr];
+		if (okl4_axon_data_info_getpending(&desc->info))
+			__transport_flush(transport);
+	} else {
+		__transport_flush(transport);
+	}
+
+	/* Free any buffer previously in the descriptor */
+	if (old_pool != NULL) {
+		u32 uptr_freed = transport->tx_uptr_freed;
+		INC_MOD(uptr_freed, transport->tx->queues[0].entries);
+		WARN_ON(uptr_freed != uptr);
+		__transport_tx_pool_free(old_pool, old_laddr);
+		transport->tx_uptr_freed = uptr_freed;
+	}
+
+	return 0;
+}
+
+static int transport_send_might_queue(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf, vs_service_id_t service_id,
+		unsigned long flags, bool *queued)
+{
+	int ret = 0;
+
+	lockdep_assert_held(&transport->readiness_lock);
+	*queued = false;
+
+	if (transport->readiness != VS_TRANSPORT_ACTIVE)
+		return -ECONNRESET;
+
+	if (!list_empty(&transport->tx_queue)) {
+		*queued = true;
+	} else {
+		ret = __transport_send(transport, mbuf, service_id, flags);
+		if (ret == -ENOSPC) {
+			*queued = true;
+			ret = 0;
+		}
+	}
+
+	if (*queued)
+		queue_tx_mbuf(mbuf, transport, service_id);
+
+	return ret;
+}
+
+static int transport_send(struct vs_transport *_transport,
+		struct vs_service_device *service, struct vs_mbuf *_mbuf,
+		unsigned long flags)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+	struct vs_mv_service_info *service_info;
+	vs_service_id_t service_id;
+	int recv_freed, freed_acks;
+	bool queued;
+	int err;
+	unsigned long irqflags;
+
+	if (WARN_ON(!transport || !mbuf || mbuf->owner != transport))
+		return -EINVAL;
+
+	service_id = transport_get_mbuf_service_id(transport,
+			mbuf_real_base(mbuf), NULL);
+
+	if (WARN_ON(service_id != service->id))
+		return -EINVAL;
+
+	service_info = transport_get_service_info(service);
+	if (!service_info)
+		return -EINVAL;
+
+	if (mbuf->base.is_recv) {
+		/*
+		 * This message buffer was allocated for receive. We don't
+		 * allow receive message buffers to be reused for sending
+		 * because it makes our quotas inconsistent.
+		 */
+		dev_err(&service_info->service->dev,
+				"Attempted to send a received message buffer\n");
+		transport_put_service_info(service_info);
+		return -EINVAL;
+	}
+
+	if (!service_info->ready) {
+		transport_put_service_info(service_info);
+		return -ECOMM;
+	}
+
+	/*
+	 * Set the message's service id reserved bits to the number of buffers
+	 * we have freed. We can only ack 2 ^ VS_SERVICE_ID_RESERVED_BITS - 1
+	 * buffers in one message.
+	 */
+	do {
+		recv_freed = atomic_read(&service_info->recv_freed);
+		freed_acks = min_t(int, recv_freed,
+				VS_SERVICE_ID_TRANSPORT_MASK);
+	} while (recv_freed != atomic_cmpxchg(&service_info->recv_freed,
+				recv_freed, recv_freed - freed_acks));
+
+	service_id = service_info->service->id;
+	vs_set_service_id_reserved_bits(&service_id, freed_acks);
+	*(vs_service_id_t *)mbuf_real_base(mbuf) = service_id;
+
+	spin_lock_irqsave(&transport->readiness_lock, irqflags);
+	err = transport_send_might_queue(transport, mbuf,
+			service_info->service->id, flags, &queued);
+	if (err) {
+		/* We failed to send, so revert the freed acks */
+		if (atomic_add_return(freed_acks,
+				&service_info->recv_freed) >=
+				service_info->recv_freed_watermark) {
+			transport->free_bufs_pending = true;
+			schedule_delayed_work(&transport->free_bufs_work, 0);
+		}
+		transport_put_service_info(service_info);
+		spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+		return err;
+	}
+
+	atomic_dec(&service_info->send_alloc);
+
+	if (queued) {
+		transport_put_service_info(service_info);
+		spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+		return 0;
+	}
+
+	/*
+	 * The mbuf was sent successfully. We can free it locally since it is
+	 * now owned by the remote end.
+	 */
+	transport_free_sent_mbuf(transport, mbuf);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"Send okay: service %d (0x%.2x) sq=%d/%d, alloc--=%d, rq=%d/%d, freed=%d/%d, bc=%d\n",
+			service_info->service->id, service_id,
+			atomic_read(&service_info->send_inflight),
+			service_info->service->send_quota,
+			atomic_read(&service_info->send_alloc),
+			atomic_read(&service_info->recv_inflight),
+			service_info->service->recv_quota, freed_acks,
+			atomic_read(&service_info->recv_freed),
+			atomic_read(&transport->free_bufs_balance));
+
+	transport_put_service_info(service_info);
+	spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+
+	return 0;
+}
+
+static void transport_free_bufs_work(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct vs_transport_axon *transport = container_of(dwork,
+			struct vs_transport_axon, free_bufs_work);
+	struct vs_mbuf_axon *mbuf;
+	int i, err, count = 0, old_balance;
+	bool queued;
+	size_t size;
+	u16 *p;
+
+	/*
+	 * Atomically decide whether to send a message, and increment
+	 * the balance if we are going to.
+	 *
+	 * We don't need barriers before these reads because they're
+	 * implicit in the work scheduling.
+	 */
+	do {
+		old_balance = atomic_read(&transport->free_bufs_balance);
+
+		/*
+		 * We only try to send if the balance is negative,
+		 * or if we have been triggered by going over a
+		 * watermark.
+		 */
+		if (old_balance >= 0 && !transport->free_bufs_pending)
+			return;
+
+		/*
+		 * If we've hit the max balance, we can't send. The
+		 * tasklet will be rescheduled next time the balance
+		 * is decremented, if free_bufs_pending is true.
+		 */
+		if (old_balance >= MAX_BALANCE)
+			return;
+
+	} while (old_balance != atomic_cmpxchg(&transport->free_bufs_balance,
+			old_balance, old_balance + 1));
+
+	/* Try to allocate a message buffer. */
+	mbuf = __transport_alloc_mbuf(transport, MSG_SEND_FREE_BUFS,
+			transport->free_bufs_pool,
+			transport->msg_size - sizeof(vs_service_id_t),
+			GFP_KERNEL | __GFP_NOWARN);
+	if (!mbuf) {
+		/* Out of memory at the moment; retry later. */
+		atomic_dec(&transport->free_bufs_balance);
+		schedule_delayed_work(dwork, FREE_BUFS_RETRY_DELAY);
+		return;
+	}
+
+	/*
+	 * Clear free_bufs_pending, because we are going to try to send.  We
+	 * need a write barrier afterwards to guarantee that this write is
+	 * ordered before any writes to the recv_freed counts, and therefore
+	 * before any remote free_bufs_pending = true when a service goes
+	 * over its watermark right after we inspect it.
+	 *
+	 * The matching barrier is implicit in the atomic_inc_return in
+	 * transport_free_mbuf().
+	 */
+	transport->free_bufs_pending = false;
+	smp_wmb();
+
+	/*
+	 * Fill in the buffer. Message format is:
+	 *
+	 *   u16: Number of services
+	 *
+	 *   For each service:
+	 *       u16: Service ID
+	 *       u16: Number of freed buffers
+	 */
+	p = mbuf->base.data;
+	*(p++) = 0;
+
+	for_each_set_bit(i, transport->service_bitmap,
+			VS_SERVICE_ID_BITMAP_BITS) {
+		struct vs_mv_service_info *service_info;
+		int recv_freed;
+		u16 freed_acks;
+
+		service_info = transport_get_service_id_info(transport, i);
+		if (!service_info)
+			continue;
+
+		/*
+		 * Don't let the message exceed the maximum size for the
+		 * transport.
+		 */
+		size = sizeof(vs_service_id_t) + sizeof(u16) +
+				(count * (2 * sizeof(u16)));
+		if (size > transport->msg_size) {
+			/* FIXME: Jira ticket SDK-3131 - ryanm. */
+			transport_put_service_info(service_info);
+			transport->free_bufs_pending = true;
+			break;
+		}
+
+		/*
+		 * We decrement each service's quota immediately by up to
+		 * USHRT_MAX. If we subsequently fail to send the message then
+		 * we return the count to what it was previously.
+		 */
+		do {
+			recv_freed = atomic_read(&service_info->recv_freed);
+			freed_acks = min_t(int, USHRT_MAX, recv_freed);
+		} while (recv_freed != atomic_cmpxchg(
+				&service_info->recv_freed,
+				recv_freed, recv_freed - freed_acks));
+
+		if (freed_acks) {
+			if (freed_acks < recv_freed)
+				transport->free_bufs_pending = true;
+
+			*(p++) = service_info->service->id;
+			*(p++) = freed_acks;
+			count++;
+
+			vs_dev_debug(VS_DEBUG_TRANSPORT,
+					transport->session_dev,
+					transport->axon_dev,
+					"  [%.2d] Freed %.2d buffers\n",
+					service_info->service->id,
+					freed_acks);
+		} else {
+			vs_dev_debug(VS_DEBUG_TRANSPORT,
+					transport->session_dev,
+					transport->axon_dev,
+					"  [%.2d] No buffers to free\n",
+					service_info->service->id);
+		}
+
+		transport_put_service_info(service_info);
+	}
+
+	if (transport->free_bufs_pending)
+		schedule_delayed_work(dwork, 0);
+
+	if (count == 0 && old_balance >= 0) {
+		/*
+		 * We are sending a new free bufs message, but we have no
+		 * freed buffers to tell the other end about. We don't send
+		 * an empty message unless the pre-increment balance was
+		 * negative (in which case we need to ack a remote free_bufs).
+		 *
+		 * Note that nobody else can increase the balance, so we only
+		 * need to check for a non-negative balance once before
+		 * decrementing. However, if the incoming free-bufs handler
+		 * concurrently decrements, the balance may become negative,
+		 * in which case we reschedule ourselves immediately to send
+		 * the ack.
+		 */
+		if (atomic_dec_return(&transport->free_bufs_balance) < 0)
+			schedule_delayed_work(dwork, 0);
+
+		__transport_free_mbuf(transport, mbuf, false);
+
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"No services had buffers to free\n");
+
+		return;
+	}
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"Sending free bufs message for %d services\n", count);
+
+	/* Fix up the message size */
+	p = mbuf->base.data;
+	*p = count;
+	mbuf->base.size = sizeof(u16) * ((count * 2) + 1);
+
+	spin_lock_irq(&transport->readiness_lock);
+	err = transport_send_might_queue(transport, mbuf, MSG_SEND_FREE_BUFS,
+			0, &queued);
+	if (err) {
+		spin_unlock_irq(&transport->readiness_lock);
+		goto fail;
+	}
+
+	/* FIXME: Jira ticket SDK-4675 - ryanm. */
+	if (!queued) {
+		/*
+		 * The mbuf was sent successfully. We can free it locally
+		 * since it is now owned by the remote end.
+		 */
+		transport_free_sent_mbuf(transport, mbuf);
+	}
+	spin_unlock_irq(&transport->readiness_lock);
+
+	return;
+
+fail:
+	dev_err(transport->axon_dev,
+			"Failed to send free bufs message: %d\n", err);
+	transport_fatal_error(transport, "free bufs send failed");
+}
+
+int transport_notify(struct vs_transport *_transport,
+		struct vs_service_device *service, unsigned long bits)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	unsigned long bit_offset, bitmask, word;
+	int first_set_bit, spilled_bits;
+
+	BUG_ON(!transport);
+
+	if (!bits)
+		return -EINVAL;
+
+	/* Check that the service isn't trying to raise bits it doesn't own */
+	if (bits & ~((1UL << service->notify_send_bits) - 1))
+		return -EINVAL;
+
+	bit_offset = service->notify_send_offset;
+	word = BIT_WORD(bit_offset);
+	bitmask = bits << (bit_offset % BITS_PER_LONG);
+
+	vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			"Sending notification %ld to service id %d\n", bitmask,
+			service->id);
+
+	_okl4_sys_vinterrupt_raise(transport->notify_cap[word], bitmask);
+
+	/*
+	* Bit range may spill into the next virqline.
+	*
+	* Check by adding the bit offset to the index of the highest set bit in
+	* the requested bitmask. If we need to raise a bit that is greater than
+	* bit 31, we have spilled into the next word and need to raise that too.
+	*/
+	first_set_bit = find_first_bit(&bits, BITS_PER_LONG);
+	spilled_bits = first_set_bit + bit_offset - (BITS_PER_LONG - 1);
+	if (spilled_bits > 0) {
+		/*
+		* Calculate the new bitmask for the spilled bits. We do this by
+		* shifting the requested bits to the right. The number of shifts
+		* is determined on where the first spilled bit is.
+		*/
+		int first_spilled_bit = first_set_bit - spilled_bits + 1;
+
+		bitmask = bits >> first_spilled_bit;
+
+		vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				"Sending notification %ld to service id %d\n", bitmask,
+				service->id);
+
+		_okl4_sys_vinterrupt_raise(transport->notify_cap[word + 1], bitmask);
+	}
+
+	return 0;
+}
+
+static void
+transport_handle_free_bufs_message(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf)
+{
+	struct vs_mv_service_info *service_info;
+	vs_service_id_t service_id;
+	u16 *p = mbuf->base.data;
+	int i, count, freed_acks, new_balance;
+
+	count = *(p++);
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"Free bufs message received for %d services\n", count);
+	for (i = 0; i < count; i++) {
+		int old_quota __maybe_unused;
+
+		service_id = *(p++);
+		freed_acks = *(p++);
+
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev, "  [%.2d] %.4d\n",
+				service_id, freed_acks);
+
+		service_info = transport_get_service_id_info(transport,
+				service_id);
+		if (!service_info) {
+			vs_dev_debug(VS_DEBUG_TRANSPORT,
+					transport->session_dev,
+					transport->axon_dev,
+					"Got %d free_acks for unknown service %d\n",
+					freed_acks, service_id);
+			continue;
+		}
+
+		old_quota = atomic_read(&service_info->send_inflight);
+		freed_acks = reduce_send_quota(transport, service_info,
+				freed_acks, service_info->ready);
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"  [%.2d] Freed %.2d buffers (%d -> %d, quota = %d)\n",
+				service_id, freed_acks, old_quota,
+				atomic_read(&service_info->send_inflight),
+				service_info->service->send_quota);
+
+		transport_put_service_info(service_info);
+	}
+
+	__transport_free_mbuf(transport, mbuf, true);
+
+	new_balance = atomic_dec_return(&transport->free_bufs_balance);
+	if (new_balance < -MAX_BALANCE) {
+		dev_err(transport->axon_dev,
+				"Balance counter fell below -MAX_BALANCE (%d < %d)\n",
+				atomic_read(&transport->free_bufs_balance),
+				-MAX_BALANCE);
+		transport_fatal_error(transport, "balance counter underrun");
+		return;
+	}
+
+	/* Check if we need to send a freed buffers message back */
+	if (new_balance < 0 || transport->free_bufs_pending)
+		schedule_delayed_work(&transport->free_bufs_work, 0);
+}
+
+static int transport_rx_queue_buffer(struct vs_transport_axon *transport,
+		void *ptr, dma_addr_t laddr)
+{
+	struct okl4_axon_queue_entry *desc;
+	okl4_axon_data_info_t info;
+
+	/* Select the buffer desc to reallocate */
+	desc = &transport->rx_descs[transport->rx_uptr_allocated];
+	info = ACCESS_ONCE(desc->info);
+
+	/* If there is no space in the rx queue, fail */
+	if (okl4_axon_data_info_getusr(&info))
+		return -ENOSPC;
+
+	/* Don't update desc before reading the clear usr bit */
+	smp_mb();
+
+	/* Update the buffer pointer in the desc and mark it valid. */
+	transport->rx_ptrs[transport->rx_uptr_allocated] = ptr;
+	okl4_axon_data_info_setladdr(&info, (okl4_laddr_t)laddr);
+	okl4_axon_data_info_setpending(&info, true);
+	okl4_axon_data_info_setusr(&info, true);
+	mb();
+	ACCESS_ONCE(desc->info) = info;
+
+	/* Proceed to the next buffer */
+	INC_MOD(transport->rx_uptr_allocated,
+			transport->rx->queues[0].entries);
+
+	/* Return true if the next desc has no buffer yet */
+	desc = &transport->rx_descs[transport->rx_uptr_allocated];
+	return !okl4_axon_data_info_getusr(&desc->info);
+}
+
+/* TODO: multiple queue support / small message prioritisation */
+static int transport_process_msg(struct vs_transport_axon *transport)
+{
+	struct vs_mv_service_info *service_info;
+	struct vs_mbuf_axon *mbuf;
+	vs_service_id_t service_id;
+	unsigned freed_acks;
+	u32 uptr;
+	struct okl4_axon_queue_entry *desc;
+	void **ptr;
+	okl4_axon_data_info_t info;
+
+	/* Select the descriptor to receive from */
+	uptr = ACCESS_ONCE(transport->rx->queues[0].uptr);
+	desc = &transport->rx_descs[uptr];
+	ptr = &transport->rx_ptrs[uptr];
+	info = ACCESS_ONCE(desc->info);
+
+	/* Have we emptied the whole queue? */
+	if (!okl4_axon_data_info_getusr(&info))
+		return -ENOBUFS;
+
+	/* Has the next buffer been filled yet? */
+	if (okl4_axon_data_info_getpending(&info))
+		return 0;
+
+	/* Don't read the buffer or desc before seeing a cleared pending bit */
+	rmb();
+
+	/* Is the message too small to be valid? */
+	if (desc->data_size < sizeof(vs_service_id_t))
+		return -EBADMSG;
+
+	/* Allocate and set up the mbuf */
+	mbuf = kmem_cache_alloc(mbuf_cache, GFP_ATOMIC);
+	if (!mbuf)
+		return -ENOMEM;
+
+	mbuf->owner = transport;
+	mbuf->laddr = okl4_axon_data_info_getladdr(&info);
+	mbuf->pool = NULL;
+	mbuf->base.is_recv = true;
+	mbuf->base.data = *ptr + sizeof(vs_service_id_t);
+	mbuf->base.size = desc->data_size - sizeof(vs_service_id_t);
+
+	INC_MOD(uptr, transport->rx->queues[0].entries);
+	ACCESS_ONCE(transport->rx->queues[0].uptr) = uptr;
+
+	/* Finish reading desc before clearing usr bit */
+	smp_mb();
+
+	/* Re-check the pending bit, in case we've just been reset */
+	info = ACCESS_ONCE(desc->info);
+	if (unlikely(okl4_axon_data_info_getpending(&info))) {
+		kmem_cache_free(mbuf_cache, mbuf);
+		return 0;
+	}
+
+	/* Clear usr bit; after this point the buffer is owned by the mbuf */
+	okl4_axon_data_info_setusr(&info, false);
+	ACCESS_ONCE(desc->info) = info;
+
+	/* Determine who to deliver the mbuf to */
+	service_id = transport_get_mbuf_service_id(transport,
+			mbuf_real_base(mbuf), &freed_acks);
+
+	if (service_id == MSG_SEND_FREE_BUFS) {
+		transport_handle_free_bufs_message(transport, mbuf);
+		return 1;
+	}
+
+	service_info = transport_get_service_id_info(transport, service_id);
+	if (!service_info) {
+		vs_dev_debug(VS_DEBUG_TRANSPORT,
+				transport->session_dev, transport->axon_dev,
+				"discarding message for missing service %d\n",
+				service_id);
+		__transport_free_mbuf(transport, mbuf, true);
+		return -EIDRM;
+	}
+
+	/*
+	 * If the remote end has freed some buffers that we sent it, then we
+	 * can decrement our send quota count by that amount.
+	 */
+	freed_acks = reduce_send_quota(transport, service_info,
+			freed_acks, service_info->ready);
+
+	/* If the service has been reset, drop the message. */
+	if (!service_info->ready) {
+		vs_dev_debug(VS_DEBUG_TRANSPORT,
+				transport->session_dev, transport->axon_dev,
+				"discarding message for reset service %d\n",
+				service_id);
+
+		__transport_free_mbuf(transport, mbuf, true);
+		transport_put_service_info(service_info);
+
+		return 1;
+	}
+
+	/*
+	 * Increment our recv quota since we are now holding a buffer. We
+	 * will decrement it when the buffer is freed in transport_free_mbuf.
+	 */
+	if (!atomic_add_unless(&service_info->recv_inflight, 1,
+				service_info->service->recv_quota)) {
+		/*
+		 * Going over the recv_quota indicates that something bad
+		 * has happened because either the other end has exceeded
+		 * its send quota or the two ends have a disagreement about
+		 * what the quota is.
+		 *
+		 * We free the buffer and reset the transport.
+		 */
+		dev_err(transport->axon_dev,
+				"Service %d is at max receive quota %d - resetting\n",
+				service_info->service->id,
+				service_info->service->recv_quota);
+
+		transport_fatal_error(transport, "rx quota exceeded");
+
+		__transport_free_mbuf(transport, mbuf, true);
+		transport_put_service_info(service_info);
+
+		return 0;
+	}
+
+	WARN_ON(atomic_read(&service_info->recv_inflight) >
+			service_info->service->recv_quota);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"receive %zu bytes from service 0x%.2x (%d): sq=%d/%d, rq=%d/%d, freed_acks=%d, freed=%d/%d bc=%d\n",
+			mbuf->base.size, service_info->service->id, service_id,
+			atomic_read(&service_info->send_inflight),
+			service_info->service->send_quota,
+			atomic_read(&service_info->recv_inflight),
+			service_info->service->recv_quota, freed_acks,
+			atomic_read(&service_info->recv_freed),
+			service_info->recv_freed_watermark,
+			atomic_read(&transport->free_bufs_balance));
+	vs_debug_dump_mbuf(transport->session_dev, &mbuf->base);
+
+	if (vs_session_handle_message(transport->session_dev, &mbuf->base,
+			service_id) < 0)
+		transport_free_mbuf(&transport->transport,
+				service_info->service, &mbuf->base);
+
+	transport_put_service_info(service_info);
+
+	return 1;
+}
+
+static void transport_flush_tx_queues(struct vs_transport_axon *transport)
+{
+	okl4_error_t err;
+	int i;
+
+	lockdep_assert_held(&transport->readiness_lock);
+
+	/* Release any queued mbufs */
+	free_tx_mbufs(transport);
+
+	/*
+	 * Re-attach the TX Axon's segment, which implicitly invalidates
+	 * the queues and stops any outgoing message transfers. The queues
+	 * will be reconfigured when the transport becomes ready again.
+	 */
+	err = _okl4_sys_axon_set_send_segment(transport->tx_cap,
+			transport->segment, transport->segment_base);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "TX reattach failed: %d\n",
+				(int)err);
+	}
+
+	/*
+	 * The TX Axon has stopped, so we can safely clear the pending
+	 * bit and free the buffer for any outgoing messages, and reset uptr
+	 * and kptr to 0.
+	 */
+	for (i = 0; i < transport->tx->queues[0].entries; i++) {
+		if (!transport->tx_pools[i])
+			continue;
+
+		okl4_axon_data_info_setpending(
+				&transport->tx_descs[i].info, false);
+		__transport_tx_pool_free(transport->tx_pools[i],
+				okl4_axon_data_info_getladdr(
+					&transport->tx_descs[i].info));
+		transport->tx_pools[i] = NULL;
+	}
+	transport->tx->queues[0].uptr = 0;
+	transport->tx->queues[0].kptr = 0;
+	transport->tx_uptr_freed = 0;
+}
+
+static void transport_flush_rx_queues(struct vs_transport_axon *transport)
+{
+	okl4_error_t err;
+	int i;
+
+	lockdep_assert_held(&transport->readiness_lock);
+
+	/*
+	 * Re-attach the TX Axon's segment, which implicitly invalidates
+	 * the queues and stops any incoming message transfers, though those
+	 * should already have cancelled those at the sending end. The queues
+	 * will be reconfigured when the transport becomes ready again.
+	 */
+	err = _okl4_sys_axon_set_recv_segment(transport->rx_cap,
+			transport->segment, transport->segment_base);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "RX reattach failed: %d\n",
+				(int)err);
+	}
+
+	/*
+	 * The RX Axon has stopped, so we can reset the pending bit on all
+	 * allocated message buffers to prepare them for reuse when the reset
+	 * completes.
+	 */
+	for (i = 0; i < transport->rx->queues[0].entries; i++) {
+		if (okl4_axon_data_info_getusr(&transport->rx_descs[i].info))
+			okl4_axon_data_info_setpending(
+					&transport->rx_descs[i].info, true);
+	}
+
+	/*
+	 * Reset kptr to the current uptr.
+	 *
+	 * We use a barrier here to ensure the pending bits are reset before
+	 * reading uptr, matching the barrier in transport_process_msg between
+	 * the uptr update and the second check of the pending bit. This means
+	 * that races with transport_process_msg() will end in one of two
+	 * ways:
+	 *
+	 * 1. transport_process_msg() updates uptr before this barrier, so the
+	 *    RX buffer is passed up to the session layer to be rejected there
+	 *    and recycled; or
+	 *
+	 * 2. the reset pending bit is seen by the second check in
+	 *    transport_process_msg(), which knows that it is being reset and
+	 *    can drop the message before it claims the buffer.
+	 */
+	smp_mb();
+	transport->rx->queues[0].kptr =
+		ACCESS_ONCE(transport->rx->queues[0].uptr);
+
+	/*
+	 * Cancel any pending freed bufs work. We can't flush it here, but
+	 * that is OK: we will do so before we become ready.
+	 */
+	cancel_delayed_work(&transport->free_bufs_work);
+}
+
+static bool transport_axon_reset(struct vs_transport_axon *transport)
+{
+	okl4_error_t err;
+	unsigned long flags;
+	bool reset_complete = false;
+
+	spin_lock_irqsave(&transport->readiness_lock, flags);
+
+	/*
+	 * Reset the transport, dumping any messages in transit, and tell the
+	 * remote end that it should do the same.
+	 *
+	 * We only do this if the transport is not already marked reset. Doing
+	 * otherwise would be redundant.
+	 */
+	if ((transport->readiness != VS_TRANSPORT_RESET) &&
+			transport->readiness != VS_TRANSPORT_LOCAL_RESET &&
+			transport->readiness != VS_TRANSPORT_REMOTE_READY) {
+		/*
+		 * Flush the Axons' TX queues. We can't flush the RX queues
+		 * until after the remote end has acknowledged the reset.
+		 */
+		transport_flush_tx_queues(transport);
+
+		/*
+		 * Raise a reset request VIRQ, and discard any incoming reset
+		 * or ready notifications as they are now stale. Note that we
+		 * must do this in a single syscall.
+		 */
+		err = _okl4_sys_vinterrupt_clear_and_raise(
+				transport->reset_okl4_irq,
+				transport->reset_cap, 0UL,
+				VS_TRANSPORT_VIRQ_RESET_REQ).error;
+		if (err != OKL4_OK) {
+			dev_err(transport->axon_dev, "Reset raise failed: %d\n",
+					(int)err);
+		}
+
+		/* Local reset is complete */
+		if (transport->readiness != VS_TRANSPORT_SHUTDOWN)
+			transport->readiness = VS_TRANSPORT_LOCAL_RESET;
+	} else {
+		/* Already in reset */
+		reset_complete = true;
+	}
+
+	spin_unlock_irqrestore(&transport->readiness_lock, flags);
+
+	return reset_complete;
+}
+
+static void transport_reset(struct vs_transport *_transport)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev, "reset\n");
+
+	if (transport_axon_reset(transport)) {
+		vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				"reset while already reset (no-op)\n");
+
+		vs_session_handle_reset(transport->session_dev);
+	}
+}
+
+static void transport_ready(struct vs_transport *_transport)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	okl4_error_t err;
+
+	vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			"%s: becoming ready\n", __func__);
+
+	/*
+	 * Make sure any previously scheduled freed bufs work is cancelled.
+	 * It should not be possible for this to be rescheduled later, as long
+	 * as the transport is in reset.
+	 */
+	cancel_delayed_work_sync(&transport->free_bufs_work);
+	spin_lock_irq(&transport->readiness_lock);
+
+	atomic_set(&transport->free_bufs_balance, 0);
+	transport->free_bufs_pending = false;
+
+	switch(transport->readiness) {
+	case VS_TRANSPORT_RESET:
+		transport->readiness = VS_TRANSPORT_LOCAL_READY;
+		break;
+	case VS_TRANSPORT_REMOTE_READY:
+		vs_session_handle_activate(transport->session_dev);
+		transport->readiness = VS_TRANSPORT_ACTIVE;
+		break;
+	case VS_TRANSPORT_LOCAL_RESET:
+		/*
+		 * Session layer is confused; usually due to the reset at init
+		 * time, which it did not explicitly request, not having
+		 * completed yet. We just ignore it and wait for the reset. We
+		 * could avoid this by not starting the session until the
+		 * startup reset completes.
+		 */
+		spin_unlock_irq(&transport->readiness_lock);
+		return;
+	case VS_TRANSPORT_SHUTDOWN:
+		/* Do nothing. */
+		spin_unlock_irq(&transport->readiness_lock);
+		return;
+	default:
+		/* Session layer is broken */
+		WARN(1, "transport_ready() called in the wrong state: %d",
+				transport->readiness);
+		goto fail;
+	}
+
+	/* Raise a ready notification VIRQ. */
+	err = _okl4_sys_vinterrupt_raise(transport->reset_cap,
+			VS_TRANSPORT_VIRQ_READY);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "Ready raise failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	/*
+	 * Set up the Axons' queue pointers.
+	 */
+	err = _okl4_sys_axon_set_send_area(transport->tx_cap,
+			transport->tx_phys, transport->tx_size);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "TX set area failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	err = _okl4_sys_axon_set_send_queue(transport->tx_cap,
+			transport->tx_phys);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "TX set queue failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	err = _okl4_sys_axon_set_recv_area(transport->rx_cap,
+			transport->rx_phys, transport->rx_size);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "RX set area failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	err = _okl4_sys_axon_set_recv_queue(transport->rx_cap,
+			transport->rx_phys);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "RX set queue failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	spin_unlock_irq(&transport->readiness_lock);
+	return;
+
+fail:
+	spin_unlock_irq(&transport->readiness_lock);
+
+	transport_axon_reset(transport);
+}
+
+static int transport_service_add(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mv_service_info *service_info;
+
+	/*
+	 * We can't print out the core service add because the session
+	 * isn't fully registered at that time.
+	 */
+	if (service->id != 0)
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"Add service - id = %d\n", service->id);
+
+	service_info = kzalloc(sizeof(*service_info), GFP_KERNEL);
+	if (!service_info)
+		return -ENOMEM;
+
+	kref_init(&service_info->kref);
+
+	/* Matching vs_put_service() is in transport_info_free */
+	service_info->service = vs_get_service(service);
+
+	/* Make the service_info visible */
+	rcu_assign_pointer(service->transport_priv, service_info);
+
+	__set_bit(service->id, transport->service_bitmap);
+
+	return 0;
+}
+
+static void transport_service_remove(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mv_service_info *service_info;
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev, "Remove service - id = %d\n",
+			service->id);
+
+	__clear_bit(service->id, transport->service_bitmap);
+
+	service_info = service->transport_priv;
+	rcu_assign_pointer(service->transport_priv, NULL);
+
+	if (service_info->ready) {
+		dev_err(transport->axon_dev,
+				"Removing service %d while ready\n",
+				service->id);
+		transport_fatal_error(transport, "removing ready service");
+	}
+
+	transport_put_service_info(service_info);
+}
+
+static struct vs_axon_tx_pool *
+transport_axon_init_tx_pool(struct vs_transport_axon *transport,
+		size_t msg_size, unsigned send_quota)
+{
+	struct vs_axon_tx_pool *pool;
+
+	pool = devm_kzalloc(transport->axon_dev, sizeof(*pool) +
+			(sizeof(unsigned long) * BITS_TO_LONGS(send_quota)),
+			GFP_KERNEL);
+	if (!pool)
+		return ERR_PTR(-ENOMEM);
+
+	pool->transport = transport;
+	pool->alloc_order = ilog2(msg_size + sizeof(vs_service_id_t));
+	pool->count = send_quota;
+
+	pool->base_vaddr = dmam_alloc_coherent(transport->axon_dev,
+			send_quota << pool->alloc_order, &pool->base_laddr,
+			GFP_KERNEL);
+	if (!pool->base_vaddr) {
+		dev_err(transport->axon_dev, "Couldn't allocate %lu times %zu bytes for TX\n",
+				(unsigned long)pool->count, (size_t)1 << pool->alloc_order);
+		devm_kfree(transport->axon_dev, pool);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	kref_init(&pool->kref);
+	return pool;
+}
+
+static int transport_service_start(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_mv_service_info *service_info;
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_notify_info *info;
+	int i, ret;
+	bool enable_rx;
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev, "Start service - id = %d\n",
+			service->id);
+
+	service_info = service->transport_priv;
+	__transport_get_service_info(service_info);
+
+	/* We shouldn't have any mbufs left from before the last reset. */
+	if (WARN_ON(atomic_read(&service_info->outstanding_frees))) {
+		transport_put_service_info(service_info);
+		return -EBUSY;
+	}
+
+	/*
+	 * The watermark is set to half of the received-message quota, rounded
+	 * down, plus one. This is fairly arbitrary. The constant offset
+	 * ensures that we don't set it to 0 for services with 1 quota (and
+	 * thus trigger infinite free_bufs messages).
+	 */
+	service_info->recv_freed_watermark = (service->recv_quota + 1) / 2;
+
+	if (WARN_ON(service->notify_recv_bits + service->notify_recv_offset >
+				transport->notify_rx_nirqs * BITS_PER_LONG)) {
+		transport_put_service_info(service_info);
+		return -EINVAL;
+	}
+
+	if (WARN_ON(service->notify_send_bits + service->notify_send_offset >
+				transport->notify_tx_nirqs * BITS_PER_LONG)) {
+		transport_put_service_info(service_info);
+		return -EINVAL;
+	}
+
+	/* This is called twice for the core client only. */
+	WARN_ON(service->id != 0 && service_info->ready);
+
+	if (!service_info->ready) {
+		WARN_ON(atomic_read(&service_info->send_alloc));
+		WARN_ON(atomic_read(&service_info->recv_freed));
+		WARN_ON(atomic_read(&service_info->recv_inflight));
+	}
+
+	/* Create the TX buffer pool. */
+	WARN_ON(service->send_quota && service_info->tx_pool);
+	if (service->send_quota) {
+		service_info->tx_pool = transport_axon_init_tx_pool(transport,
+				transport->msg_size, service->send_quota);
+		if (IS_ERR(service_info->tx_pool)) {
+			ret = PTR_ERR(service_info->tx_pool);
+			service_info->tx_pool = NULL;
+			transport_put_service_info(service_info);
+			return ret;
+		}
+	}
+
+	/* Preallocate some RX buffers, if necessary. */
+	spin_lock_irq(&transport->rx_alloc_lock);
+	i = min(transport->rx_alloc_extra,
+			service->recv_quota - service_info->rx_allocated);
+	transport->rx_alloc_extra -= i;
+	service_info->rx_allocated += i;
+	spin_unlock_irq(&transport->rx_alloc_lock);
+
+	for (; service_info->rx_allocated < service->recv_quota;
+			service_info->rx_allocated++) {
+		dma_addr_t laddr;
+		struct vs_axon_rx_freelist_entry *buf =
+			dma_pool_alloc(transport->rx_pool, GFP_KERNEL, &laddr);
+		if (WARN_ON(!buf))
+			break;
+		buf->laddr = laddr;
+
+		spin_lock_irq(&transport->rx_alloc_lock);
+		list_add(&buf->list, &transport->rx_freelist);
+		spin_unlock_irq(&transport->rx_alloc_lock);
+	}
+
+	for (i = 0; i < service->notify_recv_bits; i++) {
+		unsigned bit = i + service->notify_recv_offset;
+		info = &transport->transport.notify_info[bit];
+
+		info->service_id = service->id;
+		info->offset = service->notify_recv_offset;
+	}
+
+	atomic_set(&service_info->send_inflight, 0);
+
+	/*
+	 * If this is the core service and it wasn't ready before, we need to
+	 * enable RX for the whole transport.
+	 */
+	enable_rx = service->id == 0 && !service_info->ready;
+
+	service_info->ready = true;
+
+	/* We're now ready to receive. */
+	if (enable_rx)
+		tasklet_enable(&transport->rx_tasklet);
+
+	transport_put_service_info(service_info);
+
+	return 0;
+}
+
+static int transport_service_reset(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_mv_service_info *service_info;
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mbuf_axon *child, *tmp;
+	int ret = 0, service_id, send_remaining, recv_remaining;
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev, "Reset service - id = %d\n",
+			service->id);
+
+	service_info = service->transport_priv;
+	__transport_get_service_info(service_info);
+
+	/*
+	 * Clear the ready bit with the tasklet disabled. After this point,
+	 * incoming messages will be discarded by transport_process_msg()
+	 * without incrementing recv_inflight, so we won't spuriously see
+	 * nonzero recv_inflight values for messages that would be discarded
+	 * in the session layer.
+	 */
+	tasklet_disable(&transport->rx_tasklet);
+	service_info->ready = false;
+	if (service->id)
+		tasklet_enable(&transport->rx_tasklet);
+
+	/*
+	 * Cancel and free all pending outgoing messages for the service being
+	 * reset; i.e. those that have been sent by the service but are not
+	 * yet in the axon queue.
+	 *
+	 * Note that this does not clean out the axon queue; messages there
+	 * are already visible to OKL4 and may be transferred at any time,
+	 * so we treat those as already sent.
+	 */
+	spin_lock_irq(&transport->readiness_lock);
+	list_for_each_entry_safe(child, tmp, &transport->tx_queue, base.queue) {
+		service_id = transport_get_mbuf_service_id(transport,
+				mbuf_real_base(child), NULL);
+		if (service_id == service->id) {
+			list_del(&child->base.queue);
+			__transport_tx_pool_free(child->pool, child->laddr);
+		}
+	}
+	spin_unlock_irq(&transport->readiness_lock);
+
+	/*
+	 * If any buffers remain allocated, we mark them as outstanding frees.
+	 * The transport will remain disabled until this count goes to zero.
+	 */
+	send_remaining = atomic_read(&service_info->send_alloc);
+	recv_remaining = atomic_read(&service_info->recv_inflight);
+	ret = atomic_add_return(send_remaining + recv_remaining,
+			&service_info->outstanding_frees);
+	dev_dbg(transport->axon_dev, "reset service %d with %d outstanding (send %d, recv %d)\n",
+			service->id, ret, send_remaining, recv_remaining);
+
+	/*
+	 * Reduce the send alloc count to 0, accounting for races with frees,
+	 * which might have reduced either the alloc count or the outstanding
+	 * count.
+	 */
+	while (send_remaining > 0) {
+		unsigned new_send_remaining = atomic_cmpxchg(
+				&service_info->send_alloc, send_remaining, 0);
+		if (send_remaining == new_send_remaining) {
+			smp_mb();
+			break;
+		}
+		WARN_ON(send_remaining < new_send_remaining);
+		ret = atomic_sub_return(send_remaining - new_send_remaining,
+				&service_info->outstanding_frees);
+		send_remaining = new_send_remaining;
+		dev_dbg(transport->axon_dev, "failed to zero send quota, now %d outstanding (%d send)\n",
+				ret, send_remaining);
+	}
+
+	/* Repeat the above for the recv inflight count. */
+	while (recv_remaining > 0) {
+		unsigned new_recv_remaining = atomic_cmpxchg(
+				&service_info->recv_inflight, recv_remaining,
+				0);
+		if (recv_remaining == new_recv_remaining) {
+			smp_mb();
+			break;
+		}
+		WARN_ON(recv_remaining < new_recv_remaining);
+		ret = atomic_sub_return(recv_remaining - new_recv_remaining,
+				&service_info->outstanding_frees);
+		recv_remaining = new_recv_remaining;
+		dev_dbg(transport->axon_dev, "failed to zero recv quota, now %d outstanding (%d send)\n",
+				ret, recv_remaining);
+	}
+
+	/* The outstanding frees count should never go negative */
+	WARN_ON(ret < 0);
+
+	/* Discard any outstanding freed buffer notifications. */
+	atomic_set(&service_info->recv_freed, 0);
+
+	/*
+	 * Wait for any previously queued free_bufs work to finish. This
+	 * guarantees that any freed buffer notifications that are already in
+	 * progress will be sent to the remote end before we return, and thus
+	 * before the reset is signalled.
+	 */
+	flush_delayed_work(&transport->free_bufs_work);
+
+	if (!ret)
+		transport_free_mbuf_pools(transport, service, service_info);
+
+	transport_put_service_info(service_info);
+
+	return ret;
+}
+
+static ssize_t transport_service_send_avail(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_mv_service_info *service_info;
+	ssize_t count = 0;
+
+	service_info = service->transport_priv;
+	if (!service_info)
+		return -EINVAL;
+
+	__transport_get_service_info(service_info);
+
+	count = service->send_quota -
+		atomic_read(&service_info->send_inflight);
+
+	transport_put_service_info(service_info);
+
+	return count < 0 ? 0 : count;
+}
+
+static void transport_get_notify_bits(struct vs_transport *_transport,
+		unsigned *send_notify_bits, unsigned *recv_notify_bits)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+	*send_notify_bits = transport->notify_tx_nirqs * BITS_PER_LONG;
+	*recv_notify_bits = transport->notify_rx_nirqs * BITS_PER_LONG;
+}
+
+static void transport_get_quota_limits(struct vs_transport *_transport,
+		unsigned *send_quota, unsigned *recv_quota)
+{
+	/*
+	 * This driver does not need to enforce a quota limit, because message
+	 * buffers are allocated from the kernel heap rather than a fixed
+	 * buffer area. The queue length only determines the maximum size of
+	 * a message batch, and the number of preallocated RX buffers.
+	 *
+	 * Note that per-service quotas are still enforced; there is simply no
+	 * hard limit on the total of all service quotas.
+	 */
+
+	*send_quota = UINT_MAX;
+	*recv_quota = UINT_MAX;
+}
+
+static const struct vs_transport_vtable tvt = {
+	.alloc_mbuf		= transport_alloc_mbuf,
+	.free_mbuf		= transport_free_mbuf,
+	.mbuf_size		= transport_mbuf_size,
+	.max_mbuf_size		= transport_max_mbuf_size,
+	.send			= transport_send,
+	.flush			= transport_flush,
+	.notify			= transport_notify,
+	.reset			= transport_reset,
+	.ready			= transport_ready,
+	.service_add		= transport_service_add,
+	.service_remove		= transport_service_remove,
+	.service_start		= transport_service_start,
+	.service_reset		= transport_service_reset,
+	.service_send_avail	= transport_service_send_avail,
+	.get_notify_bits	= transport_get_notify_bits,
+	.get_quota_limits	= transport_get_quota_limits,
+};
+
+/* Incoming notification handling for client */
+static irqreturn_t transport_axon_notify_virq(int irq, void *priv)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+	struct vs_notify_info *n_info;
+	unsigned long offset, bit = 0, notification;
+	int word;
+	okl4_virq_flags_t payload = okl4_get_virq_payload(irq);
+
+	for (word = 0; word < transport->notify_rx_nirqs; word++)
+		if (irq == transport->notify_irq[word])
+			break;
+
+	if (word == transport->notify_rx_nirqs) {
+		dev_err(transport->axon_dev, "Bad IRQ %d\n", irq);
+		return IRQ_NONE;
+	}
+
+	vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			"Got notification irq\n");
+
+#if defined(__BIG_ENDIAN)
+	/*
+	 * We rely on being able to use the Linux bitmap operations directly
+	 * on the VIRQ payload.
+	 */
+	BUILD_BUG_ON((sizeof(payload) % sizeof(unsigned long)) != 0);
+#endif
+
+	for_each_set_bit(bit, (unsigned long *)&payload, sizeof(payload) * 8) {
+		offset = bit + word * BITS_PER_LONG;
+
+		/*
+		 * We need to know which service id is associated
+		 * with which notification bit here. The transport is informed
+		 * about notification bit - service id mapping during the
+		 * initialhandshake protocol.
+		 */
+		n_info = &transport->transport.notify_info[offset];
+
+		notification = 1UL << (offset - n_info->offset);
+		vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				"Got notification bit %lu for service %d\n",
+				notification, n_info->service_id);
+
+		/* FIXME: Jira ticket SDK-2145 - shivanik. */
+		vs_session_handle_notify(transport->session_dev, notification,
+				n_info->service_id);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t transport_axon_reset_irq(int irq, void *priv)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+	bool do_reset = false;
+
+	u32 payload = okl4_get_virq_payload(irq);
+
+	spin_lock(&transport->readiness_lock);
+
+	if (payload & VS_TRANSPORT_VIRQ_RESET_REQ) {
+		okl4_error_t err;
+
+		transport->readiness = VS_TRANSPORT_RESET;
+
+		/* Flush the queues in both directions */
+		transport_flush_tx_queues(transport);
+		transport_flush_rx_queues(transport);
+
+		/*
+		 * When sending an ack, it is important to cancel any earlier
+		 * ready notification, so the recipient can safely assume that
+		 * the ack precedes any ready it sees
+		 */
+		err = _okl4_sys_vinterrupt_modify(transport->reset_cap,
+				~VS_TRANSPORT_VIRQ_READY,
+				VS_TRANSPORT_VIRQ_RESET_ACK);
+		if (err != OKL4_OK) {
+			dev_warn(transport->axon_dev,
+					"Error sending reset ack: %d\n", (int)err);
+		}
+
+		/*
+		 * Discard any pending ready event; it must have happened
+		 * before the reset request was raised, because we had not
+		 * yet sent the reset ack.
+		 */
+		payload = 0;
+		do_reset = true;
+	} else if (payload & VS_TRANSPORT_VIRQ_RESET_ACK) {
+		transport->readiness = VS_TRANSPORT_RESET;
+
+		/*
+		 * Flush the RX queues, as we know at this point that the
+		 * other end has flushed its TX queues.
+		 */
+		transport_flush_rx_queues(transport);
+
+		/*
+		 * Preserve any pending ready event; it must have been
+		 * generated after the ack (see above)
+		 */
+		payload &= VS_TRANSPORT_VIRQ_READY;
+		do_reset = true;
+	}
+
+	if (do_reset) {
+		/*
+		 * Reset the session. Note that duplicate calls to this are
+		 * expected if there are duplicate resets; they don't
+		 * necessarily match activate calls.
+		 */
+		vs_session_handle_reset(transport->session_dev);
+	}
+
+	if (payload & VS_TRANSPORT_VIRQ_READY) {
+		if (transport->readiness == VS_TRANSPORT_RESET) {
+			transport->readiness = VS_TRANSPORT_REMOTE_READY;
+		} else if (transport->readiness == VS_TRANSPORT_LOCAL_READY) {
+			vs_session_handle_activate(transport->session_dev);
+			transport->readiness = VS_TRANSPORT_ACTIVE;
+		} else {
+			/* Ready lost a race with reset; ignore it. */
+		}
+	}
+
+	spin_unlock(&transport->readiness_lock);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Axon VIRQ handling.
+ */
+static irqreturn_t transport_axon_rx_irq(int irq, void *priv)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+
+	okl4_axon_virq_flags_t flags = okl4_get_virq_payload(irq);
+
+	if (okl4_axon_virq_flags_getfault(&flags)) {
+		dev_err_ratelimited(transport->axon_dev,
+				"fault on RX axon buffer or queue; resetting\n");
+		transport_axon_reset(transport);
+	} else if (okl4_axon_virq_flags_getready(&flags)) {
+		tasklet_schedule(&transport->rx_tasklet);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t transport_axon_tx_irq(int irq, void *priv)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+
+	okl4_axon_virq_flags_t flags = okl4_get_virq_payload(irq);
+
+	if (okl4_axon_virq_flags_getfault(&flags)) {
+		dev_err_ratelimited(transport->axon_dev,
+				"fault on TX axon buffer or queue; resetting\n");
+		transport_axon_reset(transport);
+	} else if (okl4_axon_virq_flags_getready(&flags)) {
+		spin_lock(&transport->readiness_lock);
+		if (!list_empty(&transport->tx_queue))
+			tasklet_schedule(&transport->tx_tasklet);
+		spin_unlock(&transport->readiness_lock);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void transport_rx_tasklet(unsigned long data)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+	int status;
+	struct _okl4_sys_axon_process_recv_return recv_result;
+
+	/* Refill the RX queue */
+	spin_lock_irq(&transport->rx_alloc_lock);
+	while (!list_empty(&transport->rx_freelist)) {
+		struct vs_axon_rx_freelist_entry *buf;
+		buf = list_first_entry(&transport->rx_freelist,
+				struct vs_axon_rx_freelist_entry, list);
+		list_del(&buf->list);
+		status = transport_rx_queue_buffer(transport, buf, buf->laddr);
+		if (status < 0)
+			list_add(&buf->list, &transport->rx_freelist);
+		if (status <= 0)
+			break;
+	}
+	spin_unlock_irq(&transport->rx_alloc_lock);
+
+	/* Start the transfer */
+	recv_result = _okl4_sys_axon_process_recv(transport->rx_cap,
+			MAX_TRANSFER_CHUNK);
+
+	if (recv_result.error == OKL4_OK) {
+		status = 1;
+	} else {
+		status = okl4_error_to_errno(recv_result.error);
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev, "rx syscall fail: %d",
+				status);
+	}
+
+	/* Process the received messages */
+	while (status > 0)
+		status = transport_process_msg(transport);
+
+	if (status == -ENOMEM) {
+		/* Give kswapd some time to reclaim pages */
+		mod_timer(&transport->rx_retry_timer, jiffies + HZ);
+	} else if (status == -ENOBUFS) {
+		/*
+		 * Reschedule ourselves if more RX buffers are available,
+		 * otherwise do nothing until a buffer is freed
+		 */
+		spin_lock_irq(&transport->rx_alloc_lock);
+		if (!list_empty(&transport->rx_freelist))
+			tasklet_schedule(&transport->rx_tasklet);
+		spin_unlock_irq(&transport->rx_alloc_lock);
+	} else if (!status && !recv_result.send_empty) {
+		/* There are more messages waiting; reschedule */
+		tasklet_schedule(&transport->rx_tasklet);
+	} else if (status < 0 && status != -ECONNRESET) {
+		/* Something else went wrong, other than a reset */
+		dev_err(transport->axon_dev, "Fatal RX error %d\n", status);
+		transport_fatal_error(transport, "rx failure");
+	} else {
+		/* Axon is empty; wait for an RX interrupt */
+	}
+}
+
+static void transport_tx_tasklet(unsigned long data)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+	struct vs_mbuf_axon *mbuf;
+	vs_service_id_t service_id;
+	int err;
+
+	spin_lock_irq(&transport->readiness_lock);
+
+	/* Check to see if there is anything in the queue to send */
+	if (list_empty(&transport->tx_queue)) {
+		/*
+		 * Queue is empty, probably because a service reset cancelled
+		 * some pending messages. Nothing to do.
+		 */
+		spin_unlock_irq(&transport->readiness_lock);
+		return;
+	}
+
+	/*
+	 * Try to send the mbuf.  If it can't, the channel must be
+	 * full again so wait until the next can send event.
+	 */
+	mbuf = list_first_entry(&transport->tx_queue, struct vs_mbuf_axon,
+			base.queue);
+
+	service_id = transport_get_mbuf_service_id(transport,
+			mbuf_real_base(mbuf), NULL);
+
+	err = __transport_send(transport, mbuf, service_id,
+			VS_TRANSPORT_SEND_FLAGS_MORE);
+	if (err == -ENOSPC) {
+		/*
+		 * The channel is currently full. Leave the message in the
+		 * queue and try again when it has emptied.
+		 */
+		__transport_flush(transport);
+		goto out_unlock;
+	}
+	if (err) {
+		/*
+		 * We cannot properly handle a message send error here because
+		 * we have already returned success for the send to the service
+		 * driver when the message was queued. We don't want to leave
+		 * the message in the queue, since it could cause a DoS if the
+		 * error is persistent. Give up and force a transport reset.
+		 */
+		dev_err(transport->axon_dev,
+				"Failed to send queued mbuf: %d\n", err);
+		spin_unlock_irq(&transport->readiness_lock);
+		transport_fatal_error(transport, "queued send failure");
+		return;
+	}
+
+	/* Message sent, remove it from the queue and free the local copy */
+	list_del(&mbuf->base.queue);
+	transport_free_sent_mbuf(transport, mbuf);
+
+	/* Check to see if we have run out of messages to send */
+	if (list_empty(&transport->tx_queue)) {
+		/* Nothing left in the queue; flush and return */
+		__transport_flush(transport);
+	} else {
+		/* Reschedule to send the next message */
+		tasklet_schedule(&transport->tx_tasklet);
+	}
+
+out_unlock:
+	spin_unlock_irq(&transport->readiness_lock);
+}
+
+static void transport_rx_retry_timer(unsigned long data)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+
+	/* Try to receive again; hopefully we have memory now */
+	tasklet_schedule(&transport->rx_tasklet);
+}
+
+/* Transport device management */
+
+static int alloc_notify_info(struct device *dev, struct vs_notify_info **info,
+		int *info_size, int virqs)
+{
+	/* Each VIRQ can handle BITS_PER_LONG notifications */
+	*info_size = sizeof(struct vs_notify_info) * (virqs * BITS_PER_LONG);
+	*info = devm_kzalloc(dev, *info_size, GFP_KERNEL);
+	if (!(*info))
+		return -ENOMEM;
+
+	memset(*info, 0, *info_size);
+	return 0;
+}
+
+static int transport_axon_probe_virqs(struct vs_transport_axon *transport)
+{
+	struct device *device = transport->axon_dev;
+	struct device_node *axon_node = device->of_node;
+	struct device_node *vs_node = transport->of_node;
+	struct irq_data *irqd;
+	struct property *irqlines;
+	int ret, num_virq_lines;
+	struct device_node *virq_node = NULL;
+	u32 cap;
+	int i, irq_count;
+
+	if (of_irq_count(axon_node) < 2) {
+		dev_err(device, "Missing axon interrupts\n");
+		return -ENODEV;
+	}
+
+	irq_count = of_irq_count(vs_node);
+	if (irq_count < 1) {
+		dev_err(device, "Missing reset interrupt\n");
+		return -ENODEV;
+	} else if (irq_count > 1 + MAX_NOTIFICATION_LINES) {
+		dev_warn(device,
+			"Too many notification interrupts; only the first %d will be used\n",
+			MAX_NOTIFICATION_LINES);
+	}
+
+	/* Find the TX and RX axon IRQs and the reset IRQ */
+	transport->tx_irq = irq_of_parse_and_map(axon_node, 0);
+	if (!transport->tx_irq) {
+		dev_err(device, "No TX IRQ\n");
+		return -ENODEV;
+	}
+
+	transport->rx_irq = irq_of_parse_and_map(axon_node, 1);
+	if (!transport->rx_irq) {
+		dev_err(device, "No RX IRQ\n");
+		return -ENODEV;
+	}
+
+	transport->reset_irq = irq_of_parse_and_map(vs_node, 0);
+	if (!transport->reset_irq) {
+		dev_err(device, "No reset IRQ\n");
+		return -ENODEV;
+	}
+	irqd = irq_get_irq_data(transport->reset_irq);
+	if (!irqd) {
+		dev_err(device, "No reset IRQ data\n");
+		return -ENODEV;
+	}
+	transport->reset_okl4_irq = irqd_to_hwirq(irqd);
+
+	/* Find the notification IRQs */
+	transport->notify_rx_nirqs = irq_count - 1;
+	for (i = 0; i < transport->notify_rx_nirqs; i++) {
+		transport->notify_irq[i] = irq_of_parse_and_map(vs_node,
+				i + 1);
+		if (!transport->notify_irq[i]) {
+			dev_err(device, "Bad notify IRQ\n");
+			return -ENODEV;
+		}
+	}
+
+	/* Find all outgoing virq lines */
+	irqlines = of_find_property(vs_node, "okl,interrupt-lines", NULL);
+	if (!irqlines || irqlines->length < sizeof(u32)) {
+		dev_err(device, "No VIRQ sources found");
+		return -ENODEV;
+	}
+	num_virq_lines = irqlines->length / sizeof(u32);
+
+	virq_node = of_parse_phandle(vs_node, "okl,interrupt-lines", 0);
+	if (!virq_node) {
+		dev_err(device, "No reset VIRQ line object\n");
+		return -ENODEV;
+	}
+	ret = of_property_read_u32(virq_node, "reg", &cap);
+	if (ret || cap == OKL4_KCAP_INVALID) {
+		dev_err(device, "Bad reset VIRQ line\n");
+		return -ENODEV;
+	}
+	transport->reset_cap = cap;
+
+	transport->notify_tx_nirqs = num_virq_lines - 1;
+	for (i = 0; i < transport->notify_tx_nirqs; i++) {
+		virq_node = of_parse_phandle(vs_node, "okl,interrupt-lines",
+				i + 1);
+		if (!virq_node) {
+			dev_err(device, "No notify VIRQ line object\n");
+			return -ENODEV;
+		}
+		ret = of_property_read_u32(virq_node, "reg", &cap);
+		if (ret || cap == OKL4_KCAP_INVALID) {
+			dev_err(device, "Bad notify VIRQ line\n");
+			return -ENODEV;
+		}
+		transport->notify_cap[i] = cap;
+	}
+
+	return 0;
+}
+
+static int transport_axon_request_irqs(struct vs_transport_axon *transport)
+{
+	struct device *device = transport->axon_dev;
+	int i, ret;
+
+	ret = devm_request_irq(device, transport->reset_irq,
+			transport_axon_reset_irq, IRQF_TRIGGER_HIGH,
+			dev_name(transport->axon_dev), transport);
+	if (ret < 0)
+		return ret;
+
+	ret = devm_request_irq(device, transport->tx_irq,
+			transport_axon_tx_irq, IRQF_TRIGGER_HIGH,
+			dev_name(transport->axon_dev), transport);
+	if (ret < 0)
+		return ret;
+
+	ret = devm_request_irq(device, transport->rx_irq,
+			transport_axon_rx_irq, IRQF_TRIGGER_HIGH,
+			dev_name(transport->axon_dev), transport);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < transport->notify_rx_nirqs; i++) {
+		ret = devm_request_irq(device, transport->notify_irq[i],
+				transport_axon_notify_virq, IRQF_TRIGGER_HIGH,
+				dev_name(transport->axon_dev), transport);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int transport_axon_setup_descs(struct vs_transport_axon *transport)
+{
+	const int rx_buffer_order = ilog2(transport->msg_size +
+			sizeof(vs_service_id_t));
+	const size_t rx_queue_size = sizeof(*transport->rx) +
+		(sizeof(*transport->rx_descs) * transport->queue_size) +
+		(sizeof(*transport->rx_ptrs) * transport->queue_size);
+	const size_t tx_queue_size = sizeof(*transport->tx) +
+		(sizeof(*transport->tx_descs) * transport->queue_size);
+	const size_t queue_size = ALIGN(rx_queue_size,
+			__alignof__(*transport->tx)) + tx_queue_size;
+
+	struct _okl4_sys_mmu_lookup_pn_return lookup_return;
+	void *queue;
+	struct device_node *seg_node;
+	u32 seg_index;
+	okl4_kcap_t seg_cap;
+	okl4_error_t err;
+	dma_addr_t dma_handle;
+	const __be32 *prop;
+	int len, ret;
+
+	/*
+	 * Allocate memory for the queue descriptors.
+	 *
+	 * We allocate one block for both rx and tx because the minimum
+	 * allocation from dmam_alloc_coherent is usually a whole page.
+	 */
+	ret = -ENOMEM;
+	queue = dmam_alloc_coherent(transport->axon_dev, queue_size,
+			&dma_handle, GFP_KERNEL);
+	if (queue == NULL) {
+		dev_err(transport->axon_dev, "Failed to allocate %zd bytes for queue descriptors\n",
+				queue_size);
+		goto fail_alloc_dma;
+	}
+	memset(queue, 0, queue_size);
+
+	/*
+	 * Find the OKL4 physical segment object to attach to the axons.
+	 *
+	 * If the device has a CMA area, and the cell's memory segments have
+	 * not been split unnecessarily, then all allocations through the DMA
+	 * API for this device will be within a single segment. So, we can
+	 * simply look up the segment that contains the queue.
+	 *
+	 * The location and size of the CMA area can be configured elsewhere.
+	 * In 3.12 and later a device-specific area can be reserved via the
+	 * standard device tree reserved-memory properties. Otherwise, the
+	 * global area will be used, which has a size configurable on the
+	 * kernel command line and defaults to 16MB.
+	 */
+
+	/* Locate the physical segment */
+	ret = -ENODEV;
+	lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+			dma_handle >> OKL4_DEFAULT_PAGEBITS, -1);
+	err = okl4_mmu_lookup_index_geterror(&lookup_return.segment_index);
+	if (err == OKL4_ERROR_NOT_IN_SEGMENT) {
+		dev_err(transport->axon_dev,
+				"No segment found for DMA address %pK (%#llx)!\n",
+				queue, (unsigned long long)dma_handle);
+		goto fail_lookup_segment;
+	}
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev,
+				"Could not look up segment for DMA address %pK (%#llx): OKL4 error %d\n",
+				queue, (unsigned long long)dma_handle,
+				(int)err);
+		goto fail_lookup_segment;
+	}
+	seg_index = okl4_mmu_lookup_index_getindex(&lookup_return.segment_index);
+
+	dev_dbg(transport->axon_dev, "lookup pn %#lx got error %ld segment %ld count %lu offset %#lx\n",
+			(long)(dma_handle >> OKL4_DEFAULT_PAGEBITS),
+			(long)err, (long)seg_index,
+			(unsigned long)lookup_return.count_pn,
+			(unsigned long)lookup_return.offset_pn);
+
+	/* Locate the physical segment's OF node */
+	for_each_compatible_node(seg_node, NULL, "okl,microvisor-segment") {
+		u32 attach_index;
+		ret = of_property_read_u32(seg_node, "okl,segment-attachment",
+				&attach_index);
+		if (attach_index == seg_index)
+			break;
+	}
+	if (seg_node == NULL) {
+		ret = -ENXIO;
+		dev_err(transport->axon_dev, "No physical segment found for %pK\n",
+				queue);
+		goto fail_lookup_segment;
+	}
+
+	/* Determine the physical segment's cap */
+	prop = of_get_property(seg_node, "reg", &len);
+	ret = !!prop ? 0 : -EPERM;
+	if (!ret)
+		seg_cap = of_read_number(prop, of_n_addr_cells(seg_node));
+	if (!ret && seg_cap == OKL4_KCAP_INVALID)
+		ret = -ENXIO;
+	if (ret < 0) {
+		dev_err(transport->axon_dev, "missing physical-segment cap\n");
+		goto fail_lookup_segment;
+	}
+	transport->segment = seg_cap;
+	transport->segment_base =
+		(round_down(dma_handle >> OKL4_DEFAULT_PAGEBITS,
+			    lookup_return.count_pn) -
+		 lookup_return.offset_pn) << OKL4_DEFAULT_PAGEBITS;
+
+	dev_dbg(transport->axon_dev, "physical segment cap is %#lx, base %#llx\n",
+			(unsigned long)transport->segment,
+			(unsigned long long)transport->segment_base);
+
+	/* Attach the segment to the Axon endpoints */
+	err = _okl4_sys_axon_set_send_segment(transport->tx_cap,
+			transport->segment, transport->segment_base);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "TX attach failed: %d\n",
+				(int)err);
+		ret = okl4_error_to_errno(err);
+		goto fail_attach;
+	}
+
+	err = _okl4_sys_axon_set_recv_segment(transport->rx_cap,
+			transport->segment, transport->segment_base);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "RX attach failed: %d\n",
+				(int)err);
+		ret = okl4_error_to_errno(err);
+		goto fail_attach;
+	}
+
+	/* Array of pointers to the source TX pool for each outgoing buffer. */
+	transport->tx_pools = devm_kzalloc(transport->axon_dev,
+			sizeof(*transport->tx_pools) * transport->queue_size,
+			GFP_KERNEL);
+	if (!transport->tx_pools) {
+		err = -ENOMEM;
+		goto fail_alloc_tx_pools;
+	}
+
+	/* Set up the rx queue descriptors. */
+	transport->rx = queue;
+	transport->rx_phys = dma_handle;
+	transport->rx_size = rx_queue_size;
+	transport->rx_descs = (void *)(transport->rx + 1);
+	transport->rx_ptrs = (void *)(transport->rx_descs + transport->queue_size);
+	okl4_axon_queue_size_setallocorder(&transport->rx->queue_sizes[0],
+			rx_buffer_order);
+	transport->rx->queues[0].queue_offset = sizeof(*transport->rx);
+	transport->rx->queues[0].entries = transport->queue_size;
+	transport->rx->queues[0].uptr = 0;
+	transport->rx->queues[0].kptr = 0;
+	transport->rx_uptr_allocated = 0;
+
+	/* Set up the tx queue descriptors. */
+	transport->tx = queue + ALIGN(rx_queue_size,
+			__alignof__(*transport->tx));
+	transport->tx_phys = dma_handle + ((void *)transport->tx - queue);
+	transport->tx_size = tx_queue_size;
+	transport->tx_descs = (void *)(transport->tx + 1);
+	transport->tx->queues[0].queue_offset = sizeof(*transport->tx);
+	transport->tx->queues[0].entries = transport->queue_size;
+	transport->tx->queues[0].uptr = 0;
+	transport->tx->queues[0].kptr = 0;
+	transport->tx_uptr_freed = 0;
+
+	/* Create a DMA pool for the RX buffers. */
+	transport->rx_pool = dmam_pool_create("vs_axon_rx_pool",
+			transport->axon_dev, 1 << rx_buffer_order,
+			max(dma_get_cache_alignment(),
+				1 << OKL4_PRESHIFT_LADDR_AXON_DATA_INFO), 0);
+
+	return 0;
+
+fail_alloc_tx_pools:
+fail_attach:
+fail_lookup_segment:
+	dmam_free_coherent(transport->axon_dev, queue_size, queue, dma_handle);
+fail_alloc_dma:
+	return ret;
+}
+
+static void transport_axon_free_descs(struct vs_transport_axon *transport)
+{
+	int i;
+
+	tasklet_disable(&transport->rx_tasklet);
+	tasklet_kill(&transport->rx_tasklet);
+
+	tasklet_disable(&transport->tx_tasklet);
+	tasklet_kill(&transport->tx_tasklet);
+
+	cancel_delayed_work_sync(&transport->free_bufs_work);
+
+	transport->tx = NULL;
+	transport->tx_descs = NULL;
+
+	for (i = 0; i < transport->rx->queues[0].entries; i++) {
+		struct okl4_axon_queue_entry *desc = &transport->rx_descs[i];
+
+		if (okl4_axon_data_info_getusr(&desc->info)) {
+			void *ptr = transport->rx_ptrs[i];
+			dma_addr_t dma = okl4_axon_data_info_getladdr(&desc->info);
+			dma_pool_free(transport->rx_pool, ptr, dma);
+		}
+	}
+
+	transport->rx = NULL;
+	transport->rx_descs = NULL;
+	transport->rx_ptrs = NULL;
+
+	/* Let devm free the queues so we don't have to keep the dma handle */
+}
+
+static int transport_axon_probe(struct platform_device *dev)
+{
+	struct vs_transport_axon *priv = NULL;
+	u32 cap[2];
+	u32 queue_size, msg_size;
+	int ret, i;
+	const char* name;
+
+	if (!dev_get_cma_area(&dev->dev) && !okl4_single_physical_segment) {
+		dev_err(&dev->dev, "Multiple physical segments, but CMA is disabled\n");
+		return -ENOSYS;
+	}
+
+	dev->dev.coherent_dma_mask = ~(u64)0;
+	dev->dev.archdata.dma_ops = &axon_dma_ops;
+
+	priv = devm_kzalloc(&dev->dev, sizeof(struct vs_transport_axon) +
+			sizeof(unsigned long), GFP_KERNEL);
+	if (priv == NULL) {
+		dev_err(&dev->dev, "create transport object failed\n");
+		ret = -ENOMEM;
+		goto err_alloc_priv;
+	}
+	dev_set_drvdata(&dev->dev, priv);
+
+	priv->of_node = of_get_child_by_name(dev->dev.of_node,
+			"virtual-session");
+	if ((!priv->of_node) ||
+			(!of_device_is_compatible(priv->of_node,
+					"okl,virtual-session"))) {
+		dev_err(&dev->dev, "missing virtual-session node\n");
+		ret = -ENODEV;
+		goto error_of_node;
+	}
+
+	name = dev->dev.of_node->full_name;
+	of_property_read_string(dev->dev.of_node, "label", &name);
+
+	if (of_property_read_bool(priv->of_node, "okl,is-client")) {
+		priv->is_server = false;
+	} else if (of_property_read_bool(priv->of_node, "okl,is-server")) {
+		priv->is_server = true;
+	} else {
+		dev_err(&dev->dev, "virtual-session node is not marked as client or server\n");
+		ret = -ENODEV;
+		goto error_of_node;
+	}
+
+	priv->transport.vt = &tvt;
+	priv->transport.type = "microvisor";
+	priv->axon_dev = &dev->dev;
+
+	/* Read the Axon caps */
+	ret = of_property_read_u32_array(dev->dev.of_node, "reg", cap, 2);
+	if (ret < 0 || cap[0] == OKL4_KCAP_INVALID ||
+			cap[1] == OKL4_KCAP_INVALID) {
+		dev_err(&dev->dev, "missing axon endpoint caps\n");
+		ret = -ENODEV;
+		goto error_of_node;
+	}
+	priv->tx_cap = cap[0];
+	priv->rx_cap = cap[1];
+
+	/* Set transport properties; default to a 64kb buffer */
+	queue_size = 16;
+	(void)of_property_read_u32(priv->of_node, "okl,queue-length",
+			&queue_size);
+	priv->queue_size = max((size_t)queue_size, MIN_QUEUE_SIZE);
+
+	msg_size = PAGE_SIZE - sizeof(vs_service_id_t);
+	(void)of_property_read_u32(priv->of_node, "okl,message-size",
+			&msg_size);
+	priv->msg_size = max((size_t)msg_size, MIN_MSG_SIZE);
+
+	/*
+	 * Since the Axon API requires received message size limits to be
+	 * powers of two, we must round up the message size (including the
+	 * space reserved for the service ID).
+	 */
+	priv->msg_size = roundup_pow_of_two(priv->msg_size +
+			sizeof(vs_service_id_t)) - sizeof(vs_service_id_t);
+	if (priv->msg_size != msg_size)
+		dev_info(&dev->dev, "message size rounded up from %zd to %zd\n",
+				(size_t)msg_size, priv->msg_size);
+
+	INIT_LIST_HEAD(&priv->tx_queue);
+
+	/* Initialise the activation state, tasklets, and RX retry timer */
+	spin_lock_init(&priv->readiness_lock);
+	priv->readiness = VS_TRANSPORT_INIT;
+
+	tasklet_init(&priv->rx_tasklet, transport_rx_tasklet,
+		(unsigned long)priv);
+	tasklet_init(&priv->tx_tasklet, transport_tx_tasklet,
+		(unsigned long)priv);
+
+	INIT_DELAYED_WORK(&priv->free_bufs_work, transport_free_bufs_work);
+	spin_lock_init(&priv->rx_alloc_lock);
+	priv->rx_alloc_extra = 0;
+	INIT_LIST_HEAD(&priv->rx_freelist);
+
+	setup_timer(&priv->rx_retry_timer, transport_rx_retry_timer,
+			(unsigned long)priv);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+	set_timer_slack(&priv->rx_retry_timer, HZ);
+#endif
+
+	/* Keep RX disabled until the core service is ready. */
+	tasklet_disable(&priv->rx_tasklet);
+
+	ret = transport_axon_probe_virqs(priv);
+	if (ret < 0)
+		goto err_probe_virqs;
+
+	if (priv->notify_rx_nirqs) {
+		ret = alloc_notify_info(&dev->dev, &priv->transport.notify_info,
+				&priv->transport.notify_info_size,
+				priv->notify_rx_nirqs);
+		if (ret < 0) {
+			dev_err(&dev->dev, "Alloc notify_info failed\n");
+			goto err_alloc_notify;
+		}
+	} else {
+		priv->transport.notify_info = NULL;
+		priv->transport.notify_info_size = 0;
+	}
+
+	priv->free_bufs_pool = transport_axon_init_tx_pool(priv, priv->msg_size,
+			FREE_BUFS_QUOTA);
+	if (IS_ERR(priv->free_bufs_pool)) {
+		ret = PTR_ERR(priv->free_bufs_pool);
+		goto err_init_free_bufs_pool;
+	}
+
+	ret = transport_axon_setup_descs(priv);
+	if (ret < 0)
+		goto err_setup_descs;
+
+	/* Allocate RX buffers for free bufs messages */
+	for (i = 0; i < FREE_BUFS_QUOTA; i++) {
+		dma_addr_t laddr;
+		struct vs_axon_rx_freelist_entry *buf =
+			dma_pool_alloc(priv->rx_pool, GFP_KERNEL, &laddr);
+		if (!buf)
+			goto err_alloc_rx_free_bufs;
+		buf->laddr = laddr;
+
+		spin_lock_irq(&priv->rx_alloc_lock);
+		list_add_tail(&buf->list, &priv->rx_freelist);
+		spin_unlock_irq(&priv->rx_alloc_lock);
+	}
+
+	/* Set up the session device */
+	priv->session_dev = vs_session_register(&priv->transport, &dev->dev,
+			priv->is_server, name);
+	if (IS_ERR(priv->session_dev)) {
+		ret = PTR_ERR(priv->session_dev);
+		dev_err(&dev->dev, "failed to register session: %d\n", ret);
+		goto err_session_register;
+	}
+
+	/*
+	 * Start the core service. Note that it can't actually communicate
+	 * until the initial reset completes.
+	 */
+	vs_session_start(priv->session_dev);
+
+	/*
+	 * Reset the transport. This will also set the Axons' segment
+	 * attachments, and eventually the Axons' queue pointers (once the
+	 * session marks the transport ready).
+	 */
+	transport_reset(&priv->transport);
+
+	/*
+	 * We're ready to start handling IRQs at this point, so register the
+	 * handlers.
+	 */
+	ret = transport_axon_request_irqs(priv);
+	if (ret < 0)
+		goto err_irq_register;
+
+	return 0;
+
+err_irq_register:
+	vs_session_unregister(priv->session_dev);
+err_session_register:
+err_alloc_rx_free_bufs:
+	transport_axon_free_descs(priv);
+err_setup_descs:
+	transport_axon_put_tx_pool(priv->free_bufs_pool);
+err_init_free_bufs_pool:
+	if (priv->transport.notify_info)
+		devm_kfree(&dev->dev, priv->transport.notify_info);
+err_alloc_notify:
+err_probe_virqs:
+	del_timer_sync(&priv->rx_retry_timer);
+	tasklet_kill(&priv->rx_tasklet);
+	tasklet_kill(&priv->tx_tasklet);
+	cancel_delayed_work_sync(&priv->free_bufs_work);
+error_of_node:
+	devm_kfree(&dev->dev, priv);
+err_alloc_priv:
+	return ret;
+}
+
+static int transport_axon_remove(struct platform_device *dev)
+{
+	struct vs_transport_axon *priv = dev_get_drvdata(&dev->dev);
+	int i;
+
+	for (i = 0; i < priv->notify_rx_nirqs; i++)
+		devm_free_irq(&dev->dev, priv->notify_irq[i], priv);
+
+	devm_free_irq(&dev->dev, priv->rx_irq, priv);
+	irq_dispose_mapping(priv->rx_irq);
+	devm_free_irq(&dev->dev, priv->tx_irq, priv);
+	irq_dispose_mapping(priv->tx_irq);
+	devm_free_irq(&dev->dev, priv->reset_irq, priv);
+	irq_dispose_mapping(priv->reset_irq);
+
+	del_timer_sync(&priv->rx_retry_timer);
+	tasklet_kill(&priv->rx_tasklet);
+	tasklet_kill(&priv->tx_tasklet);
+	cancel_delayed_work_sync(&priv->free_bufs_work);
+
+	priv->readiness = VS_TRANSPORT_SHUTDOWN;
+	vs_session_unregister(priv->session_dev);
+	WARN_ON(priv->readiness != VS_TRANSPORT_SHUTDOWN);
+
+	transport_axon_free_descs(priv);
+	transport_axon_put_tx_pool(priv->free_bufs_pool);
+
+	if (priv->transport.notify_info)
+		devm_kfree(&dev->dev, priv->transport.notify_info);
+
+	free_tx_mbufs(priv);
+
+	flush_workqueue(work_queue);
+
+	while (!list_empty(&priv->rx_freelist)) {
+		struct vs_axon_rx_freelist_entry *buf;
+		buf = list_first_entry(&priv->rx_freelist,
+				struct vs_axon_rx_freelist_entry, list);
+		list_del(&buf->list);
+		dma_pool_free(priv->rx_pool, buf, buf->laddr);
+	}
+
+	devm_kfree(&dev->dev, priv);
+	return 0;
+}
+
+static const struct of_device_id transport_axon_of_match[] = {
+	{ .compatible = "okl,microvisor-axon-transport", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, transport_axon_of_match);
+
+static struct platform_driver transport_axon_driver = {
+	.probe		= transport_axon_probe,
+	.remove		= transport_axon_remove,
+	.driver = {
+		.name		= DRIVER_NAME,
+		.owner		= THIS_MODULE,
+		.bus		= &platform_bus_type,
+		.of_match_table = of_match_ptr(transport_axon_of_match),
+	},
+};
+
+static int __init vs_transport_axon_init(void)
+{
+	int ret;
+	okl4_error_t err;
+	struct device_node *cpus;
+	struct zone *zone;
+	struct _okl4_sys_mmu_lookup_pn_return lookup_return;
+	u32 last_seen_attachment = -1;
+	bool first_attachment;
+
+	printk(KERN_INFO "Virtual Services transport driver for OKL4 Axons\n");
+
+	/* Allocate the Axon cleanup workqueue */
+	work_queue = alloc_workqueue("axon_cleanup", 0, 0);
+	if (!work_queue) {
+		ret = -ENOMEM;
+		goto fail_create_workqueue;
+	}
+
+	/* Locate the MMU capability, needed for lookups */
+	cpus = of_find_node_by_path("/cpus");
+	if (IS_ERR_OR_NULL(cpus)) {
+		ret = -EINVAL;
+		goto fail_mmu_cap;
+	}
+	ret = of_property_read_u32(cpus, "okl,vmmu-capability", &okl4_mmu_cap);
+	if (ret) {
+		goto fail_mmu_cap;
+	}
+	if (okl4_mmu_cap == OKL4_KCAP_INVALID) {
+		printk(KERN_ERR "%s: OKL4 MMU capability not found\n", __func__);
+		ret = -EPERM;
+		goto fail_mmu_cap;
+	}
+
+	/*
+	 * Determine whether there are multiple OKL4 physical memory segments
+	 * in this Cell. If so, every transport device must have a valid CMA
+	 * region, to guarantee that its buffer allocations all come from the
+	 * segment that is attached to the axon endpoints.
+	 *
+	 * We assume that each zone is contiguously mapped in stage 2 with a
+	 * constant physical-to-IPA offset, typically 0. The weaver won't
+	 * violate this assumption for Linux (or other HLOS) guests unless it
+	 * is explicitly told to.
+	 */
+	okl4_single_physical_segment = true;
+	first_attachment = true;
+	for_each_zone(zone) {
+		u32 attachment;
+
+		/* We only care about zones that the page allocator is using */
+		if (!zone->managed_pages)
+			continue;
+
+		/* Find the segment at the start of the zone */
+		lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+				zone->zone_start_pfn, -1);
+		err = okl4_mmu_lookup_index_geterror(
+				&lookup_return.segment_index);
+		if (err != OKL4_OK) {
+			printk(KERN_WARNING "%s: Unable to determine physical segment count, assuming >1\n",
+					__func__);
+			okl4_single_physical_segment = false;
+			break;
+		}
+		attachment = okl4_mmu_lookup_index_getindex(
+				&lookup_return.segment_index);
+
+		if (first_attachment) {
+			last_seen_attachment = attachment;
+			first_attachment = false;
+		} else if (last_seen_attachment != attachment) {
+			okl4_single_physical_segment = false;
+			break;
+		}
+
+		/* Find the segment at the end of the zone */
+		lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+				zone_end_pfn(zone) - 1, -1);
+		err = okl4_mmu_lookup_index_geterror(
+				&lookup_return.segment_index);
+		if (err != OKL4_OK) {
+			printk(KERN_WARNING "%s: Unable to determine physical segment count, assuming >1\n",
+					__func__);
+			okl4_single_physical_segment = false;
+			break;
+		}
+		attachment = okl4_mmu_lookup_index_getindex(
+				&lookup_return.segment_index);
+
+		/* Check that it's still the same segment */
+		if (last_seen_attachment != attachment) {
+			okl4_single_physical_segment = false;
+			break;
+		}
+	}
+
+#ifdef DEBUG
+	printk(KERN_DEBUG "%s: physical segment count %s\n", __func__,
+			okl4_single_physical_segment ? "1" : ">1");
+#endif
+
+	mbuf_cache = KMEM_CACHE(vs_mbuf_axon, 0UL);
+	if (!mbuf_cache) {
+		ret = -ENOMEM;
+		goto kmem_cache_failed;
+	}
+
+	ret = platform_driver_register(&transport_axon_driver);
+	if (ret)
+		goto register_plat_driver_failed;
+
+	return ret;
+
+register_plat_driver_failed:
+	kmem_cache_destroy(mbuf_cache);
+	mbuf_cache = NULL;
+kmem_cache_failed:
+fail_mmu_cap:
+	if (work_queue)
+		destroy_workqueue(work_queue);
+fail_create_workqueue:
+	return ret;
+}
+
+static void __exit vs_transport_axon_exit(void)
+{
+	platform_driver_unregister(&transport_axon_driver);
+
+	rcu_barrier();
+
+	if (mbuf_cache)
+		kmem_cache_destroy(mbuf_cache);
+	mbuf_cache = NULL;
+
+	if (work_queue)
+		destroy_workqueue(work_queue);
+}
+
+module_init(vs_transport_axon_init);
+module_exit(vs_transport_axon_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 5676aef..f4e59c4 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -18,15 +18,16 @@
 
 static void disable_hotplug_cpu(int cpu)
 {
-	if (cpu_online(cpu)) {
-		lock_device_hotplug();
+	if (!cpu_is_hotpluggable(cpu))
+		return;
+	lock_device_hotplug();
+	if (cpu_online(cpu))
 		device_offline(get_cpu_device(cpu));
-		unlock_device_hotplug();
-	}
-	if (cpu_present(cpu))
+	if (!cpu_online(cpu) && cpu_present(cpu)) {
 		xen_arch_unregister_cpu(cpu);
-
-	set_cpu_present(cpu, false);
+		set_cpu_present(cpu, false);
+	}
+	unlock_device_hotplug();
 }
 
 static int vcpu_online(unsigned int cpu)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 1435d8c..4b0cc9d 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -139,7 +139,7 @@
 		clear_evtchn_to_irq_row(row);
 	}
 
-	evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
+	evtchn_to_irq[row][col] = irq;
 	return 0;
 }
 
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 7abaaa5..abd49bc 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -282,9 +282,11 @@
 		/*
 		 * The Xenstore watch fires directly after registering it and
 		 * after a suspend/resume cycle. So ENOENT is no error but
-		 * might happen in those cases.
+		 * might happen in those cases. ERANGE is observed when we get
+		 * an empty value (''), this happens when we acknowledge the
+		 * request by writing '\0' below.
 		 */
-		if (err != -ENOENT)
+		if (err != -ENOENT && err != -ERANGE)
 			pr_err("Error %d reading sysrq code in control/sysrq\n",
 			       err);
 		xenbus_transaction_end(xbt, 1);
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index a0b3e7d..211ac47 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -101,9 +101,6 @@
 	case SFM_LESSTHAN:
 		*target = '<';
 		break;
-	case SFM_SLASH:
-		*target = '\\';
-		break;
 	case SFM_SPACE:
 		*target = ' ';
 		break;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 8407b07..741b83c 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -577,10 +577,15 @@
 	}
 
 	count = 0;
+	/*
+	 * We know that all the name entries in the protocols array
+	 * are short (< 16 bytes anyway) and are NUL terminated.
+	 */
 	for (i = 0; i < CIFS_NUM_PROT; i++) {
-		strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
-		count += strlen(protocols[i].name) + 1;
-		/* null at end of source and target buffers anyway */
+		size_t len = strlen(protocols[i].name) + 1;
+
+		memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
+		count += len;
 	}
 	inc_rfc1001_len(pSMB, count);
 	pSMB->ByteCount = cpu_to_le16(count);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 323d8e3..50559a8 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -406,9 +406,17 @@
 			(struct smb_com_transaction_change_notify_rsp *)buf;
 		struct file_notify_information *pnotify;
 		__u32 data_offset = 0;
+		size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
+
 		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
 			data_offset = le32_to_cpu(pSMBr->DataOffset);
 
+			if (data_offset >
+			    len - sizeof(struct file_notify_information)) {
+				cifs_dbg(FYI, "invalid data_offset %u\n",
+					 data_offset);
+				return true;
+			}
 			pnotify = (struct file_notify_information *)
 				((char *)&pSMBr->hdr.Protocol + data_offset);
 			cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 68622f1..08c1c86 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -989,7 +989,7 @@
 	}
 
 	srch_inf->entries_in_buffer = 0;
-	srch_inf->index_of_last_entry = 0;
+	srch_inf->index_of_last_entry = 2;
 
 	rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
 				  fid->volatile_fid, 0, srch_inf);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index e8b3650..e16bc4c 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -74,7 +74,7 @@
 	else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
 		error_msg = "rec_len is too small for name_len";
 	else if (unlikely(((char *) de - buf) + rlen > size))
-		error_msg = "directory entry across range";
+		error_msg = "directory entry overrun";
 	else if (unlikely(le32_to_cpu(de->inode) >
 			le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
 		error_msg = "inode out of bounds";
@@ -83,18 +83,16 @@
 
 	if (filp)
 		ext4_error_file(filp, function, line, bh->b_blocknr,
-				"bad entry in directory: %s - offset=%u(%u), "
-				"inode=%u, rec_len=%d, name_len=%d",
-				error_msg, (unsigned) (offset % size),
-				offset, le32_to_cpu(de->inode),
-				rlen, de->name_len);
+				"bad entry in directory: %s - offset=%u, "
+				"inode=%u, rec_len=%d, name_len=%d, size=%d",
+				error_msg, offset, le32_to_cpu(de->inode),
+				rlen, de->name_len, size);
 	else
 		ext4_error_inode(dir, function, line, bh->b_blocknr,
-				"bad entry in directory: %s - offset=%u(%u), "
-				"inode=%u, rec_len=%d, name_len=%d",
-				error_msg, (unsigned) (offset % size),
-				offset, le32_to_cpu(de->inode),
-				rlen, de->name_len);
+				"bad entry in directory: %s - offset=%u, "
+				"inode=%u, rec_len=%d, name_len=%d, size=%d",
+				 error_msg, offset, le32_to_cpu(de->inode),
+				 rlen, de->name_len, size);
 
 	return 1;
 }
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 74e831f..f901b643 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1768,6 +1768,7 @@
 {
 	int err, inline_size;
 	struct ext4_iloc iloc;
+	size_t inline_len;
 	void *inline_pos;
 	unsigned int offset;
 	struct ext4_dir_entry_2 *de;
@@ -1795,8 +1796,9 @@
 		goto out;
 	}
 
+	inline_len = ext4_get_inline_size(dir);
 	offset = EXT4_INLINE_DOTDOT_SIZE;
-	while (offset < dir->i_size) {
+	while (offset < inline_len) {
 		de = ext4_get_inline_entry(dir, &iloc, offset,
 					   &inline_pos, &inline_size);
 		if (ext4_check_dir_entry(dir, NULL, de,
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index d89754e..c2e830a 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -48,7 +48,6 @@
 	 */
 	sb_start_write(sb);
 	ext4_mmp_csum_set(sb, mmp);
-	mark_buffer_dirty(bh);
 	lock_buffer(bh);
 	bh->b_end_io = end_buffer_write_sync;
 	get_bh(bh);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 09020e4..9bad755 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3434,6 +3434,12 @@
 	int credits;
 	u8 old_file_type;
 
+	if (new.inode && new.inode->i_nlink == 0) {
+		EXT4_ERROR_INODE(new.inode,
+				 "target of rename is already freed");
+		return -EFSCORRUPTED;
+	}
+
 	if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) &&
 	    (!projid_eq(EXT4_I(new_dir)->i_projid,
 			EXT4_I(old_dentry->d_inode)->i_projid)))
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index eb720d9..1da301e 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -18,6 +18,7 @@
 
 int ext4_resize_begin(struct super_block *sb)
 {
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	int ret = 0;
 
 	if (!capable(CAP_SYS_RESOURCE))
@@ -28,7 +29,7 @@
          * because the user tools have no way of handling this.  Probably a
          * bad time to do it anyways.
          */
-	if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+	if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
 	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
 		ext4_warning(sb, "won't resize using backup superblock at %llu",
 			(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
@@ -1954,6 +1955,26 @@
 		}
 	}
 
+	/*
+	 * Make sure the last group has enough space so that it's
+	 * guaranteed to have enough space for all metadata blocks
+	 * that it might need to hold.  (We might not need to store
+	 * the inode table blocks in the last block group, but there
+	 * will be cases where this might be needed.)
+	 */
+	if ((ext4_group_first_block_no(sb, n_group) +
+	     ext4_group_overhead_blocks(sb, n_group) + 2 +
+	     sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
+		n_blocks_count = ext4_group_first_block_no(sb, n_group);
+		n_group--;
+		n_blocks_count_retry = 0;
+		if (resize_inode) {
+			iput(resize_inode);
+			resize_inode = NULL;
+		}
+		goto retry;
+	}
+
 	/* extend the last group */
 	if (n_group == o_group)
 		add = n_blocks_count - o_blocks_count;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1a2c223..031e43d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2019,6 +2019,8 @@
 		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
 	if (test_opt(sb, DATA_ERR_ABORT))
 		SEQ_OPTS_PUTS("data_err=abort");
+	if (DUMMY_ENCRYPTION_ENABLED(sbi))
+		SEQ_OPTS_PUTS("test_dummy_encryption");
 
 	ext4_show_quota_options(seq, sb);
 	return 0;
@@ -4193,11 +4195,13 @@
 	block = ext4_count_free_clusters(sb);
 	ext4_free_blocks_count_set(sbi->s_es, 
 				   EXT4_C2B(sbi, block));
+	ext4_superblock_csum_set(sb);
 	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
 				  GFP_KERNEL);
 	if (!err) {
 		unsigned long freei = ext4_count_free_inodes(sb);
 		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
+		ext4_superblock_csum_set(sb);
 		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
 					  GFP_KERNEL);
 	}
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fdcbe0f..7d6da09 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -209,12 +209,12 @@
 {
 	int error;
 
-	if (buffer_verified(bh))
-		return 0;
-
 	if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
 	    BHDR(bh)->h_blocks != cpu_to_le32(1))
 		return -EFSCORRUPTED;
+	if (buffer_verified(bh))
+		return 0;
+
 	if (!ext4_xattr_block_csum_verify(inode, bh))
 		return -EFSBADCRC;
 	error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
@@ -645,14 +645,20 @@
 }
 
 static int
-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
+ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
+		     struct inode *inode)
 {
-	struct ext4_xattr_entry *last;
+	struct ext4_xattr_entry *last, *next;
 	size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
 
 	/* Compute min_offs and last. */
 	last = s->first;
-	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+	for (; !IS_LAST_ENTRY(last); last = next) {
+		next = EXT4_XATTR_NEXT(last);
+		if ((void *)next >= s->end) {
+			EXT4_ERROR_INODE(inode, "corrupted xattr entries");
+			return -EFSCORRUPTED;
+		}
 		if (last->e_value_size) {
 			size_t offs = le16_to_cpu(last->e_value_offs);
 			if (offs < min_offs)
@@ -834,7 +840,7 @@
 			mb_cache_entry_delete_block(ext4_mb_cache, hash,
 						    bs->bh->b_blocknr);
 			ea_bdebug(bs->bh, "modifying in-place");
-			error = ext4_xattr_set_entry(i, s);
+			error = ext4_xattr_set_entry(i, s, inode);
 			if (!error) {
 				if (!IS_LAST_ENTRY(s->first))
 					ext4_xattr_rehash(header(s->base),
@@ -881,7 +887,7 @@
 		s->end = s->base + sb->s_blocksize;
 	}
 
-	error = ext4_xattr_set_entry(i, s);
+	error = ext4_xattr_set_entry(i, s, inode);
 	if (error == -EFSCORRUPTED)
 		goto bad_block;
 	if (error)
@@ -1079,7 +1085,7 @@
 
 	if (EXT4_I(inode)->i_extra_isize == 0)
 		return -ENOSPC;
-	error = ext4_xattr_set_entry(i, s);
+	error = ext4_xattr_set_entry(i, s, inode);
 	if (error) {
 		if (error == -ENOSPC &&
 		    ext4_has_inline_data(inode)) {
@@ -1091,7 +1097,7 @@
 			error = ext4_xattr_ibody_find(inode, i, is);
 			if (error)
 				return error;
-			error = ext4_xattr_set_entry(i, s);
+			error = ext4_xattr_set_entry(i, s, inode);
 		}
 		if (error)
 			return error;
@@ -1117,7 +1123,7 @@
 
 	if (EXT4_I(inode)->i_extra_isize == 0)
 		return -ENOSPC;
-	error = ext4_xattr_set_entry(i, s);
+	error = ext4_xattr_set_entry(i, s, inode);
 	if (error)
 		return error;
 	header = IHDR(inode, ext4_raw_inode(&is->iloc));
@@ -1426,6 +1432,11 @@
 		last = IFIRST(header);
 		/* Find the entry best suited to be pushed into EA block */
 		for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+			/* never move system.data out of the inode */
+			if ((last->e_name_len == 4) &&
+			    (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
+			    !memcmp(last->e_name, "data", 4))
+				continue;
 			total_size =
 			EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
 					EXT4_XATTR_LEN(last->e_name_len);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index d7a53c6..6b24eb4 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -28,6 +28,7 @@
 
 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
 {
+	f2fs_build_fault_attr(sbi, 0, 0);
 	set_ckpt_flags(sbi, CP_ERROR_FLAG);
 	if (!end_io)
 		f2fs_flush_merged_writes(sbi);
@@ -70,6 +71,7 @@
 		.encrypted_page = NULL,
 		.is_meta = is_meta,
 	};
+	int err;
 
 	if (unlikely(!is_meta))
 		fio.op_flags &= ~REQ_META;
@@ -84,9 +86,10 @@
 
 	fio.page = page;
 
-	if (f2fs_submit_page_bio(&fio)) {
+	err = f2fs_submit_page_bio(&fio);
+	if (err) {
 		f2fs_put_page(page, 1);
-		goto repeat;
+		return ERR_PTR(err);
 	}
 
 	lock_page(page);
@@ -95,14 +98,9 @@
 		goto repeat;
 	}
 
-	/*
-	 * if there is any IO error when accessing device, make our filesystem
-	 * readonly and make sure do not write checkpoint with non-uptodate
-	 * meta page.
-	 */
 	if (unlikely(!PageUptodate(page))) {
-		memset(page_address(page), 0, PAGE_SIZE);
-		f2fs_stop_checkpoint(sbi, false);
+		f2fs_put_page(page, 1);
+		return ERR_PTR(-EIO);
 	}
 out:
 	return page;
@@ -113,13 +111,32 @@
 	return __get_meta_page(sbi, index, true);
 }
 
+struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+	struct page *page;
+	int count = 0;
+
+retry:
+	page = __get_meta_page(sbi, index, true);
+	if (IS_ERR(page)) {
+		if (PTR_ERR(page) == -EIO &&
+				++count <= DEFAULT_RETRY_IO_COUNT)
+			goto retry;
+
+		f2fs_stop_checkpoint(sbi, false);
+		f2fs_bug_on(sbi, 1);
+	}
+
+	return page;
+}
+
 /* for POR only */
 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
 {
 	return __get_meta_page(sbi, index, false);
 }
 
-bool f2fs_is_valid_meta_blkaddr(struct f2fs_sb_info *sbi,
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
 					block_t blkaddr, int type)
 {
 	switch (type) {
@@ -140,8 +157,20 @@
 			return false;
 		break;
 	case META_POR:
+	case DATA_GENERIC:
 		if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
-			blkaddr < MAIN_BLKADDR(sbi)))
+			blkaddr < MAIN_BLKADDR(sbi))) {
+			if (type == DATA_GENERIC) {
+				f2fs_msg(sbi->sb, KERN_WARNING,
+					"access invalid blkaddr:%u", blkaddr);
+				WARN_ON(1);
+			}
+			return false;
+		}
+		break;
+	case META_GENERIC:
+		if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
+			blkaddr >= MAIN_BLKADDR(sbi)))
 			return false;
 		break;
 	default:
@@ -176,7 +205,7 @@
 	blk_start_plug(&plug);
 	for (; nrpages-- > 0; blkno++) {
 
-		if (!f2fs_is_valid_meta_blkaddr(sbi, blkno, type))
+		if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
 			goto out;
 
 		switch (type) {
@@ -242,11 +271,8 @@
 
 	trace_f2fs_writepage(page, META);
 
-	if (unlikely(f2fs_cp_error(sbi))) {
-		dec_page_count(sbi, F2FS_DIRTY_META);
-		unlock_page(page);
-		return 0;
-	}
+	if (unlikely(f2fs_cp_error(sbi)))
+		goto redirty_out;
 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
 		goto redirty_out;
 	if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
@@ -529,13 +555,12 @@
 
 	spin_lock(&im->ino_lock);
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_ORPHAN)) {
 		spin_unlock(&im->ino_lock);
 		f2fs_show_injection_info(FAULT_ORPHAN);
 		return -ENOSPC;
 	}
-#endif
+
 	if (unlikely(im->ino_num >= sbi->max_orphans))
 		err = -ENOSPC;
 	else
@@ -572,12 +597,7 @@
 {
 	struct inode *inode;
 	struct node_info ni;
-	int err = f2fs_acquire_orphan_inode(sbi);
-
-	if (err)
-		goto err_out;
-
-	__add_ino_entry(sbi, ino, 0, ORPHAN_INO);
+	int err;
 
 	inode = f2fs_iget_retry(sbi->sb, ino);
 	if (IS_ERR(inode)) {
@@ -600,14 +620,15 @@
 	/* truncate all the data during iput */
 	iput(inode);
 
-	f2fs_get_node_info(sbi, ino, &ni);
+	err = f2fs_get_node_info(sbi, ino, &ni);
+	if (err)
+		goto err_out;
 
 	/* ENOMEM was fully retried in f2fs_evict_inode. */
 	if (ni.blk_addr != NULL_ADDR) {
 		err = -EIO;
 		goto err_out;
 	}
-	__remove_ino_entry(sbi, ino, ORPHAN_INO);
 	return 0;
 
 err_out:
@@ -639,7 +660,10 @@
 	/* Needed for iput() to work correctly and not trash data */
 	sbi->sb->s_flags |= MS_ACTIVE;
 
-	/* Turn on quotas so that they are updated correctly */
+	/*
+	 * Turn on quotas which were not enabled for read-only mounts if
+	 * filesystem has quota feature, so that they are updated correctly.
+	 */
 	quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
 #endif
 
@@ -649,9 +673,15 @@
 	f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
 
 	for (i = 0; i < orphan_blocks; i++) {
-		struct page *page = f2fs_get_meta_page(sbi, start_blk + i);
+		struct page *page;
 		struct f2fs_orphan_block *orphan_blk;
 
+		page = f2fs_get_meta_page(sbi, start_blk + i);
+		if (IS_ERR(page)) {
+			err = PTR_ERR(page);
+			goto out;
+		}
+
 		orphan_blk = (struct f2fs_orphan_block *)page_address(page);
 		for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
 			nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
@@ -742,10 +772,14 @@
 	__u32 crc = 0;
 
 	*cp_page = f2fs_get_meta_page(sbi, cp_addr);
+	if (IS_ERR(*cp_page))
+		return PTR_ERR(*cp_page);
+
 	*cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
 
 	crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
 	if (crc_offset > (blk_size - sizeof(__le32))) {
+		f2fs_put_page(*cp_page, 1);
 		f2fs_msg(sbi->sb, KERN_WARNING,
 			"invalid crc_offset: %zu", crc_offset);
 		return -EINVAL;
@@ -753,6 +787,7 @@
 
 	crc = cur_cp_crc(*cp_block);
 	if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+		f2fs_put_page(*cp_page, 1);
 		f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
 		return -EINVAL;
 	}
@@ -772,14 +807,22 @@
 	err = get_checkpoint_version(sbi, cp_addr, &cp_block,
 					&cp_page_1, version);
 	if (err)
-		goto invalid_cp1;
+		return NULL;
+
+	if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
+					sbi->blocks_per_seg) {
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"invalid cp_pack_total_block_count:%u",
+			le32_to_cpu(cp_block->cp_pack_total_block_count));
+		goto invalid_cp;
+	}
 	pre_version = *version;
 
 	cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
 	err = get_checkpoint_version(sbi, cp_addr, &cp_block,
 					&cp_page_2, version);
 	if (err)
-		goto invalid_cp2;
+		goto invalid_cp;
 	cur_version = *version;
 
 	if (cur_version == pre_version) {
@@ -787,9 +830,8 @@
 		f2fs_put_page(cp_page_2, 1);
 		return cp_page_1;
 	}
-invalid_cp2:
 	f2fs_put_page(cp_page_2, 1);
-invalid_cp1:
+invalid_cp:
 	f2fs_put_page(cp_page_1, 1);
 	return NULL;
 }
@@ -838,15 +880,15 @@
 	cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
 	memcpy(sbi->ckpt, cp_block, blk_size);
 
-	/* Sanity checking of checkpoint */
-	if (f2fs_sanity_check_ckpt(sbi))
-		goto free_fail_no_cp;
-
 	if (cur_page == cp1)
 		sbi->cur_cp_pack = 1;
 	else
 		sbi->cur_cp_pack = 2;
 
+	/* Sanity checking of checkpoint */
+	if (f2fs_sanity_check_ckpt(sbi))
+		goto free_fail_no_cp;
+
 	if (cp_blks <= 1)
 		goto done;
 
@@ -859,6 +901,8 @@
 		unsigned char *ckpt = (unsigned char *)sbi->ckpt;
 
 		cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
+		if (IS_ERR(cur_page))
+			goto free_fail_no_cp;
 		sit_bitmap_ptr = page_address(cur_page);
 		memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
 		f2fs_put_page(cur_page, 1);
@@ -980,12 +1024,10 @@
 
 		iput(inode);
 		/* We need to give cpu to another writers. */
-		if (ino == cur_ino) {
-			congestion_wait(BLK_RW_ASYNC, HZ/50);
+		if (ino == cur_ino)
 			cond_resched();
-		} else {
+		else
 			ino = cur_ino;
-		}
 	} else {
 		/*
 		 * We should submit bio, since it exists several
@@ -1119,7 +1161,7 @@
 	f2fs_unlock_all(sbi);
 }
 
-static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
+void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
 {
 	DEFINE_WAIT(wait);
 
@@ -1129,6 +1171,9 @@
 		if (!get_pages(sbi, F2FS_WB_CP_DATA))
 			break;
 
+		if (unlikely(f2fs_cp_error(sbi)))
+			break;
+
 		io_schedule_timeout(5*HZ);
 	}
 	finish_wait(&sbi->cp_wait, &wait);
@@ -1202,8 +1247,12 @@
 
 	/* writeout cp pack 2 page */
 	err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
-	f2fs_bug_on(sbi, err);
+	if (unlikely(err && f2fs_cp_error(sbi))) {
+		f2fs_put_page(page, 1);
+		return;
+	}
 
+	f2fs_bug_on(sbi, err);
 	f2fs_put_page(page, 0);
 
 	/* submit checkpoint (with barrier if NOBARRIER is not set) */
@@ -1229,7 +1278,7 @@
 	while (get_pages(sbi, F2FS_DIRTY_META)) {
 		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
 		if (unlikely(f2fs_cp_error(sbi)))
-			return -EIO;
+			break;
 	}
 
 	/*
@@ -1309,7 +1358,7 @@
 			f2fs_sync_meta_pages(sbi, META, LONG_MAX,
 							FS_CP_META_IO);
 			if (unlikely(f2fs_cp_error(sbi)))
-				return -EIO;
+				break;
 		}
 	}
 
@@ -1348,10 +1397,7 @@
 	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
 
 	/* wait for previous submitted meta pages writeback */
-	wait_on_all_pages_writeback(sbi);
-
-	if (unlikely(f2fs_cp_error(sbi)))
-		return -EIO;
+	f2fs_wait_on_all_pages_writeback(sbi);
 
 	/* flush all device cache */
 	err = f2fs_flush_device_cache(sbi);
@@ -1360,12 +1406,19 @@
 
 	/* barrier and flush checkpoint cp pack 2 page if it can */
 	commit_checkpoint(sbi, ckpt, start_blk);
-	wait_on_all_pages_writeback(sbi);
+	f2fs_wait_on_all_pages_writeback(sbi);
+
+	/*
+	 * invalidate intermediate page cache borrowed from meta inode
+	 * which are used for migration of encrypted inode's blocks.
+	 */
+	if (f2fs_sb_has_encrypt(sbi->sb))
+		invalidate_mapping_pages(META_MAPPING(sbi),
+				MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
 
 	f2fs_release_ino_entry(sbi, false);
 
-	if (unlikely(f2fs_cp_error(sbi)))
-		return -EIO;
+	f2fs_reset_fsync_node_info(sbi);
 
 	clear_sbi_flag(sbi, SBI_IS_DIRTY);
 	clear_sbi_flag(sbi, SBI_NEED_CP);
@@ -1381,7 +1434,7 @@
 
 	f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
 
-	return 0;
+	return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
 }
 
 /*
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index f7f2c13..6570c75 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -126,12 +126,10 @@
 
 static void f2fs_read_end_io(struct bio *bio)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
 		f2fs_show_injection_info(FAULT_IO);
 		bio->bi_error = -EIO;
 	}
-#endif
 
 	if (f2fs_bio_post_read_required(bio)) {
 		struct bio_post_read_ctx *ctx = bio->bi_private;
@@ -177,6 +175,8 @@
 					page->index != nid_of_node(page));
 
 		dec_page_count(sbi, type);
+		if (f2fs_in_warm_node_list(sbi, page))
+			f2fs_del_fsync_node_entry(sbi, page);
 		clear_cold_data(page);
 		end_page_writeback(page);
 	}
@@ -263,7 +263,7 @@
 		if (type != DATA && type != NODE)
 			goto submit_io;
 
-		if (f2fs_sb_has_blkzoned(sbi->sb) && current->plug)
+		if (test_opt(sbi, LFS) && current->plug)
 			blk_finish_plug(current->plug);
 
 		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
@@ -441,7 +441,10 @@
 			fio->encrypted_page : fio->page;
 	struct inode *inode = fio->page->mapping->host;
 
-	verify_block_addr(fio, fio->new_blkaddr);
+	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
+			__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
+		return -EFAULT;
+
 	trace_f2fs_submit_page_bio(page, fio);
 	f2fs_trace_ios(fio, 0);
 
@@ -493,7 +496,7 @@
 		spin_unlock(&io->io_lock);
 	}
 
-	if (is_valid_blkaddr(fio->old_blkaddr))
+	if (__is_valid_data_blkaddr(fio->old_blkaddr))
 		verify_block_addr(fio, fio->old_blkaddr);
 	verify_block_addr(fio, fio->new_blkaddr);
 
@@ -554,19 +557,22 @@
 }
 
 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
-							 unsigned nr_pages)
+					unsigned nr_pages, unsigned op_flag)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct bio *bio;
 	struct bio_post_read_ctx *ctx;
 	unsigned int post_read_steps = 0;
 
+	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+		return ERR_PTR(-EFAULT);
+
 	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
 	if (!bio)
 		return ERR_PTR(-ENOMEM);
 	f2fs_target_device(sbi, blkaddr, bio);
 	bio->bi_end_io = f2fs_read_end_io;
-	bio_set_op_attrs(bio, REQ_OP_READ, 0);
+	bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
 
         if (f2fs_encrypted_file(inode) &&
             !fscrypt_using_hardware_encryption(inode))
@@ -592,7 +598,7 @@
 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
 							block_t blkaddr)
 {
-	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);
+	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
 
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
@@ -893,6 +899,7 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct f2fs_summary sum;
 	struct node_info ni;
+	block_t old_blkaddr;
 	pgoff_t fofs;
 	blkcnt_t count = 1;
 	int err;
@@ -900,6 +907,10 @@
 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
 		return -EPERM;
 
+	err = f2fs_get_node_info(sbi, dn->nid, &ni);
+	if (err)
+		return err;
+
 	dn->data_blkaddr = datablock_addr(dn->inode,
 				dn->node_page, dn->ofs_in_node);
 	if (dn->data_blkaddr == NEW_ADDR)
@@ -909,11 +920,13 @@
 		return err;
 
 alloc:
-	f2fs_get_node_info(sbi, dn->nid, &ni);
 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
-
-	f2fs_allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
+	old_blkaddr = dn->data_blkaddr;
+	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
 					&sum, seg_type, NULL, false);
+	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+		invalidate_mapping_pages(META_MAPPING(sbi),
+					old_blkaddr, old_blkaddr);
 	f2fs_set_data_blkaddr(dn);
 
 	/* update i_size */
@@ -1069,7 +1082,13 @@
 next_block:
 	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
 
-	if (!is_valid_blkaddr(blkaddr)) {
+	if (__is_valid_data_blkaddr(blkaddr) &&
+		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
+		err = -EFAULT;
+		goto sync_out;
+	}
+
+	if (!is_valid_data_blkaddr(sbi, blkaddr)) {
 		if (create) {
 			if (unlikely(f2fs_cp_error(sbi))) {
 				err = -EIO;
@@ -1306,7 +1325,11 @@
 		if (!page)
 			return -ENOMEM;
 
-		f2fs_get_node_info(sbi, inode->i_ino, &ni);
+		err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+		if (err) {
+			f2fs_put_page(page, 1);
+			return err;
+		}
 
 		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
 		offset = offsetof(struct f2fs_inode, i_addr) +
@@ -1333,7 +1356,11 @@
 		if (!page)
 			return -ENOMEM;
 
-		f2fs_get_node_info(sbi, xnid, &ni);
+		err = f2fs_get_node_info(sbi, xnid, &ni);
+		if (err) {
+			f2fs_put_page(page, 1);
+			return err;
+		}
 
 		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
 		len = inode->i_sb->s_blocksize;
@@ -1445,10 +1472,15 @@
 /*
  * This function was originally taken from fs/mpage.c, and customized for f2fs.
  * Major change was from block_size == page_size in f2fs by default.
+ *
+ * Note that the aops->readpages() function is ONLY used for read-ahead. If
+ * this function ever deviates from doing just read-ahead, it should either
+ * use ->readpage() or do the necessary surgery to decouple ->readpages()
+ * from read-ahead.
  */
 static int f2fs_mpage_readpages(struct address_space *mapping,
 			struct list_head *pages, struct page *page,
-			unsigned nr_pages)
+			unsigned nr_pages, bool is_readahead)
 {
 	struct bio *bio = NULL;
 	sector_t last_block_in_bio = 0;
@@ -1521,6 +1553,10 @@
 				SetPageUptodate(page);
 				goto confused;
 			}
+
+			if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
+								DATA_GENERIC))
+				goto set_error_page;
 		} else {
 			zero_user_segment(page, 0, PAGE_SIZE);
 			if (!PageUptodate(page))
@@ -1548,7 +1584,8 @@
 		}
 
 		if (bio == NULL) {
-			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
+			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
+					is_readahead ? REQ_RAHEAD : 0);
 			if (IS_ERR(bio)) {
 				bio = NULL;
 				goto set_error_page;
@@ -1593,7 +1630,7 @@
 	if (f2fs_has_inline_data(inode))
 		ret = f2fs_read_inline_data(inode, page);
 	if (ret == -EAGAIN)
-		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
+		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false);
 	return ret;
 }
 
@@ -1610,12 +1647,13 @@
 	if (f2fs_has_inline_data(inode))
 		return 0;
 
-	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
+	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
 }
 
 static int encrypt_one_page(struct f2fs_io_info *fio)
 {
 	struct inode *inode = fio->page->mapping->host;
+	struct page *mpage;
 	gfp_t gfp_flags = GFP_NOFS;
 
 	if (!f2fs_encrypted_file(inode))
@@ -1630,17 +1668,25 @@
 
 	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
 			PAGE_SIZE, 0, fio->page->index, gfp_flags);
-	if (!IS_ERR(fio->encrypted_page))
-		return 0;
-
-	/* flush pending IOs and wait for a while in the ENOMEM case */
-	if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
-		f2fs_flush_merged_writes(fio->sbi);
-		congestion_wait(BLK_RW_ASYNC, HZ/50);
-		gfp_flags |= __GFP_NOFAIL;
-		goto retry_encrypt;
+	if (IS_ERR(fio->encrypted_page)) {
+		/* flush pending IOs and wait for a while in the ENOMEM case */
+		if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
+			f2fs_flush_merged_writes(fio->sbi);
+			congestion_wait(BLK_RW_ASYNC, HZ/50);
+			gfp_flags |= __GFP_NOFAIL;
+			goto retry_encrypt;
+		}
+		return PTR_ERR(fio->encrypted_page);
 	}
-	return PTR_ERR(fio->encrypted_page);
+
+	mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
+	if (mpage) {
+		if (PageUptodate(mpage))
+			memcpy(page_address(mpage),
+				page_address(fio->encrypted_page), PAGE_SIZE);
+		f2fs_put_page(mpage, 1);
+	}
+	return 0;
 }
 
 static inline bool check_inplace_update_policy(struct inode *inode,
@@ -1724,6 +1770,7 @@
 	struct inode *inode = page->mapping->host;
 	struct dnode_of_data dn;
 	struct extent_info ei = {0,0,0};
+	struct node_info ni;
 	bool ipu_force = false;
 	int err = 0;
 
@@ -1732,11 +1779,13 @@
 			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
 		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
 
-		if (is_valid_blkaddr(fio->old_blkaddr)) {
-			ipu_force = true;
-			fio->need_lock = LOCK_DONE;
-			goto got_it;
-		}
+		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+							DATA_GENERIC))
+			return -EFAULT;
+
+		ipu_force = true;
+		fio->need_lock = LOCK_DONE;
+		goto got_it;
 	}
 
 	/* Deadlock due to between page->lock and f2fs_lock_op */
@@ -1755,11 +1804,17 @@
 		goto out_writepage;
 	}
 got_it:
+	if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
+		!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+							DATA_GENERIC)) {
+		err = -EFAULT;
+		goto out_writepage;
+	}
 	/*
 	 * If current allocation needs SSR,
 	 * it had better in-place writes for updated data.
 	 */
-	if (ipu_force || (is_valid_blkaddr(fio->old_blkaddr) &&
+	if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
 					need_inplace_update(fio))) {
 		err = encrypt_one_page(fio);
 		if (err)
@@ -1784,6 +1839,12 @@
 		fio->need_lock = LOCK_REQ;
 	}
 
+	err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
+	if (err)
+		goto out_writepage;
+
+	fio->version = ni.version;
+
 	err = encrypt_one_page(fio);
 	if (err)
 		goto out_writepage;
@@ -2207,10 +2268,14 @@
 	loff_t i_size = i_size_read(inode);
 
 	if (to > i_size) {
+		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 		down_write(&F2FS_I(inode)->i_mmap_sem);
+
 		truncate_pagecache(inode, i_size);
 		f2fs_truncate_blocks(inode, i_size, true);
+
 		up_write(&F2FS_I(inode)->i_mmap_sem);
+		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 	}
 }
 
@@ -2315,8 +2380,9 @@
 	}
 	trace_f2fs_write_begin(inode, pos, len, flags);
 
-	if (f2fs_is_atomic_file(inode) &&
-			!f2fs_available_free_memory(sbi, INMEM_PAGES)) {
+	if ((f2fs_is_atomic_file(inode) &&
+			!f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
+			is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
 		err = -ENOMEM;
 		drop_atomic = true;
 		goto fail;
@@ -2441,14 +2507,20 @@
 static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
 			   loff_t offset)
 {
-	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
+	unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
+	unsigned blkbits = i_blkbits;
+	unsigned blocksize_mask = (1 << blkbits) - 1;
+	unsigned long align = offset | iov_iter_alignment(iter);
+	struct block_device *bdev = inode->i_sb->s_bdev;
 
-	if (offset & blocksize_mask)
-		return -EINVAL;
-
-	if (iov_iter_alignment(iter) & blocksize_mask)
-		return -EINVAL;
-
+	if (align & blocksize_mask) {
+		if (bdev)
+			blkbits = blksize_bits(bdev_logical_block_size(bdev));
+		blocksize_mask = (1 << blkbits) - 1;
+		if (align & blocksize_mask)
+			return -EINVAL;
+		return 1;
+	}
 	return 0;
 }
 
@@ -2466,7 +2538,7 @@
 
 	err = check_direct_IO(inode, iter, offset);
 	if (err)
-		return err;
+		return err < 0 ? err : 0;
 
 	if (f2fs_force_buffered_io(inode, rw))
 		return 0;
@@ -2588,6 +2660,10 @@
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
 
+	/* don't remain PG_checked flag which was set during GC */
+	if (is_cold_data(page))
+		clear_cold_data(page);
+
 	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
 		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
 			f2fs_register_inmem_page(inode, page);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 2d65e77..214a968 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -215,7 +215,8 @@
 	si->base_mem += sizeof(struct f2fs_nm_info);
 	si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
 	si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
-	si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
+	si->base_mem += NM_I(sbi)->nat_blocks *
+				f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
 	si->base_mem += NM_I(sbi)->nat_blocks / 8;
 	si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
 
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 63c0732..56cc274 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -517,12 +517,11 @@
 	}
 
 start:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) {
 		f2fs_show_injection_info(FAULT_DIR_DEPTH);
 		return -ENOSPC;
 	}
-#endif
+
 	if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
 		return -ENOSPC;
 
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index b696bc8..5bcbdce 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -43,7 +43,6 @@
 	} while (0)
 #endif
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 enum {
 	FAULT_KMALLOC,
 	FAULT_KVMALLOC,
@@ -58,16 +57,20 @@
 	FAULT_TRUNCATE,
 	FAULT_IO,
 	FAULT_CHECKPOINT,
+	FAULT_DISCARD,
 	FAULT_MAX,
 };
 
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+#define F2FS_ALL_FAULT_TYPE		((1 << FAULT_MAX) - 1)
+
 struct f2fs_fault_info {
 	atomic_t inject_ops;
 	unsigned int inject_rate;
 	unsigned int inject_type;
 };
 
-extern char *fault_name[FAULT_MAX];
+extern char *f2fs_fault_name[FAULT_MAX];
 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
 #endif
 
@@ -180,7 +183,6 @@
 
 #define MAX_DISCARD_BLOCKS(sbi)		BLKS_PER_SEC(sbi)
 #define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
-#define DEF_MAX_DISCARD_LEN		512	/* Max. 2MB per discard */
 #define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
 #define DEF_MID_DISCARD_ISSUE_TIME	500	/* 500 ms, if device busy */
 #define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
@@ -196,7 +198,7 @@
 };
 
 /*
- * For CP/NAT/SIT/SSA readahead
+ * indicate meta/data type
  */
 enum {
 	META_CP,
@@ -204,6 +206,8 @@
 	META_SIT,
 	META_SSA,
 	META_POR,
+	DATA_GENERIC,
+	META_GENERIC,
 };
 
 /* for the list of ino */
@@ -228,6 +232,12 @@
 	struct inode *inode;	/* vfs inode pointer */
 };
 
+struct fsync_node_entry {
+	struct list_head list;	/* list head */
+	struct page *page;	/* warm node page pointer */
+	unsigned int seq_id;	/* sequence id */
+};
+
 /* for the bitmap indicate blocks to be discarded */
 struct discard_entry {
 	struct list_head list;	/* list head */
@@ -244,9 +254,10 @@
 					(MAX_PLIST_NUM - 1) : (blk_num - 1))
 
 enum {
-	D_PREP,
-	D_SUBMIT,
-	D_DONE,
+	D_PREP,			/* initial */
+	D_PARTIAL,		/* partially submitted */
+	D_SUBMIT,		/* all submitted */
+	D_DONE,			/* finished */
 };
 
 struct discard_info {
@@ -271,7 +282,10 @@
 	struct block_device *bdev;	/* bdev */
 	unsigned short ref;		/* reference count */
 	unsigned char state;		/* state */
+	unsigned char issuing;		/* issuing discard */
 	int error;			/* bio error */
+	spinlock_t lock;		/* for state/bio_ref updating */
+	unsigned short bio_ref;		/* bio reference count */
 };
 
 enum {
@@ -291,6 +305,7 @@
 	unsigned int io_aware_gran;	/* minimum granularity discard not be aware of I/O */
 	bool io_aware;			/* issue discard in idle time */
 	bool sync;			/* submit discard with REQ_SYNC flag */
+	bool ordered;			/* issue discard by lba order */
 	unsigned int granularity;	/* discard granularity */
 };
 
@@ -307,10 +322,12 @@
 	unsigned int max_discards;		/* max. discards to be issued */
 	unsigned int discard_granularity;	/* discard granularity */
 	unsigned int undiscard_blks;		/* # of undiscard blocks */
+	unsigned int next_pos;			/* next discard position */
 	atomic_t issued_discard;		/* # of issued discard */
 	atomic_t issing_discard;		/* # of issing discard */
 	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
 	struct rb_root root;			/* root of discard rb-tree */
+	bool rbtree_check;			/* config for consistence check */
 };
 
 /* for the list of fsync inodes, used only during recovery */
@@ -507,13 +524,12 @@
 					 */
 };
 
+#define DEFAULT_RETRY_IO_COUNT	8	/* maximum retry read IO count */
+
 #define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
 
 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
 
-/* vector size for gang look-up from extent cache that consists of radix tree */
-#define EXT_TREE_VEC_SIZE	64
-
 /* for in-memory extent cache entry */
 #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
 
@@ -599,6 +615,8 @@
 #define FADVISE_HOT_BIT		0x20
 #define FADVISE_VERITY_BIT	0x40	/* reserved */
 
+#define FADVISE_MODIFIABLE_BITS	(FADVISE_COLD_BIT | FADVISE_HOT_BIT)
+
 #define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
 #define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
 #define file_set_cold(inode)	set_file(inode, FADVISE_COLD_BIT)
@@ -697,22 +715,22 @@
 }
 
 static inline bool __is_discard_mergeable(struct discard_info *back,
-						struct discard_info *front)
+			struct discard_info *front, unsigned int max_len)
 {
 	return (back->lstart + back->len == front->lstart) &&
-		(back->len + front->len < DEF_MAX_DISCARD_LEN);
+		(back->len + front->len <= max_len);
 }
 
 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
-						struct discard_info *back)
+			struct discard_info *back, unsigned int max_len)
 {
-	return __is_discard_mergeable(back, cur);
+	return __is_discard_mergeable(back, cur, max_len);
 }
 
 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
-						struct discard_info *front)
+			struct discard_info *front, unsigned int max_len)
 {
-	return __is_discard_mergeable(cur, front);
+	return __is_discard_mergeable(cur, front, max_len);
 }
 
 static inline bool __is_extent_mergeable(struct extent_info *back,
@@ -767,6 +785,7 @@
 	struct radix_tree_root nat_set_root;/* root of the nat set cache */
 	struct rw_semaphore nat_tree_lock;	/* protect nat_tree_lock */
 	struct list_head nat_entries;	/* cached nat entry list (clean) */
+	spinlock_t nat_list_lock;	/* protect clean nat entry list */
 	unsigned int nat_cnt;		/* the # of cached nat entries */
 	unsigned int dirty_nat_cnt;	/* total num of nat entries in set */
 	unsigned int nat_blocks;	/* # of nat blocks */
@@ -1015,6 +1034,7 @@
 	bool retry;		/* need to reallocate block address */
 	enum iostat_type io_type;	/* io type */
 	struct writeback_control *io_wbc; /* writeback control */
+	unsigned char version;		/* version of the node */
 };
 
 #define is_read_io(rw) ((rw) == READ)
@@ -1066,6 +1086,7 @@
 	SBI_POR_DOING,				/* recovery is doing or not */
 	SBI_NEED_SB_WRITE,			/* need to recover superblock */
 	SBI_NEED_CP,				/* need to checkpoint */
+	SBI_IS_SHUTDOWN,			/* shutdown by ioctl */
 };
 
 enum {
@@ -1149,6 +1170,11 @@
 
 	struct inode_management im[MAX_INO_ENTRY];      /* manage inode cache */
 
+	spinlock_t fsync_node_lock;		/* for node entry lock */
+	struct list_head fsync_node_list;	/* node list head */
+	unsigned int fsync_seg_id;		/* sequence id */
+	unsigned int fsync_node_num;		/* number of node entries */
+
 	/* for orphan inode, use 0'th array */
 	unsigned int max_orphans;		/* max orphan inodes */
 
@@ -1216,6 +1242,7 @@
 	unsigned int gc_mode;			/* current GC state */
 	/* for skip statistic */
 	unsigned long long skipped_atomic_files[2];	/* FG_GC and BG_GC */
+	unsigned long long skipped_gc_rwsem;		/* FG_GC only */
 
 	/* threshold for gc trials on pinned files */
 	u64 gc_pin_file_threshold;
@@ -1280,7 +1307,7 @@
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 #define f2fs_show_injection_info(type)				\
 	printk("%sF2FS-fs : inject %s in %s of %pF\n",		\
-		KERN_INFO, fault_name[type],			\
+		KERN_INFO, f2fs_fault_name[type],		\
 		__func__, __builtin_return_address(0))
 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
 {
@@ -1299,6 +1326,12 @@
 	}
 	return false;
 }
+#else
+#define f2fs_show_injection_info(type) do { } while (0)
+static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+{
+	return false;
+}
 #endif
 
 /* For write statistics. Suppose sector size is 512 bytes,
@@ -1327,7 +1360,7 @@
 	struct request_list *rl = &q->root_rl;
 
 	if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
-		return 0;
+		return false;
 
 	return f2fs_time_over(sbi, REQ_TIME);
 }
@@ -1651,13 +1684,12 @@
 	if (ret)
 		return ret;
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_BLOCK)) {
 		f2fs_show_injection_info(FAULT_BLOCK);
 		release = *count;
 		goto enospc;
 	}
-#endif
+
 	/*
 	 * let's increase this in prior to actual block count change in order
 	 * for f2fs_sync_file to avoid data races when deciding checkpoint.
@@ -1681,18 +1713,20 @@
 		sbi->total_valid_block_count -= diff;
 		if (!*count) {
 			spin_unlock(&sbi->stat_lock);
-			percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
 			goto enospc;
 		}
 	}
 	spin_unlock(&sbi->stat_lock);
 
-	if (unlikely(release))
+	if (unlikely(release)) {
+		percpu_counter_sub(&sbi->alloc_valid_block_count, release);
 		dquot_release_reservation_block(inode, release);
+	}
 	f2fs_i_blocks_write(inode, *count, true, true);
 	return 0;
 
 enospc:
+	percpu_counter_sub(&sbi->alloc_valid_block_count, release);
 	dquot_release_reservation_block(inode, release);
 	return -ENOSPC;
 }
@@ -1864,12 +1898,10 @@
 			return ret;
 	}
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_BLOCK)) {
 		f2fs_show_injection_info(FAULT_BLOCK);
 		goto enospc;
 	}
-#endif
 
 	spin_lock(&sbi->stat_lock);
 
@@ -1954,17 +1986,23 @@
 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
 						pgoff_t index, bool for_write)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-	struct page *page = find_lock_page(mapping, index);
+	struct page *page;
 
-	if (page)
-		return page;
+	if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
+		if (!for_write)
+			page = find_get_page_flags(mapping, index,
+							FGP_LOCK | FGP_ACCESSED);
+		else
+			page = find_lock_page(mapping, index);
+		if (page)
+			return page;
 
-	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
-		f2fs_show_injection_info(FAULT_PAGE_ALLOC);
-		return NULL;
+		if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
+			f2fs_show_injection_info(FAULT_PAGE_ALLOC);
+			return NULL;
+		}
 	}
-#endif
+
 	if (!for_write)
 		return grab_cache_page(mapping, index);
 	return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
@@ -1974,12 +2012,11 @@
 				struct address_space *mapping, pgoff_t index,
 				int fgp_flags, gfp_t gfp_mask)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
 		f2fs_show_injection_info(FAULT_PAGE_GET);
 		return NULL;
 	}
-#endif
+
 	return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
 }
 
@@ -2044,12 +2081,11 @@
 			bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
 		return bio;
 	}
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
 		f2fs_show_injection_info(FAULT_ALLOC_BIO);
 		return NULL;
 	}
-#endif
+
 	return bio_alloc(GFP_KERNEL, npages);
 }
 
@@ -2584,12 +2620,11 @@
 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
 					size_t size, gfp_t flags)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_KMALLOC)) {
 		f2fs_show_injection_info(FAULT_KMALLOC);
 		return NULL;
 	}
-#endif
+
 	return kmalloc(size, flags);
 }
 
@@ -2632,12 +2667,11 @@
 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
 					size_t size, gfp_t flags)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_KVMALLOC)) {
 		f2fs_show_injection_info(FAULT_KVMALLOC);
 		return NULL;
 	}
-#endif
+
 	return kvmalloc(size, flags);
 }
 
@@ -2696,13 +2730,39 @@
 	spin_unlock(&sbi->iostat_lock);
 }
 
-static inline bool is_valid_blkaddr(block_t blkaddr)
+#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META &&	\
+				(!is_read_io(fio->op) || fio->is_meta))
+
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+					block_t blkaddr, int type);
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
+					block_t blkaddr, int type)
+{
+	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"invalid blkaddr: %u, type: %d, run fsck to fix.",
+			blkaddr, type);
+		f2fs_bug_on(sbi, 1);
+	}
+}
+
+static inline bool __is_valid_data_blkaddr(block_t blkaddr)
 {
 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
 		return false;
 	return true;
 }
 
+static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
+						block_t blkaddr)
+{
+	if (!__is_valid_data_blkaddr(blkaddr))
+		return false;
+	verify_blkaddr(sbi, blkaddr, DATA_GENERIC);
+	return true;
+}
+
 /*
  * file.c
  */
@@ -2817,16 +2877,21 @@
 
 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
-void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
 						struct node_info *ni);
 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
 int f2fs_truncate_xattr_node(struct inode *inode);
-int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino);
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+					unsigned int seq_id);
 int f2fs_remove_inode_page(struct inode *inode);
 struct page *f2fs_new_inode_page(struct inode *inode);
 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
@@ -2835,11 +2900,12 @@
 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
 void f2fs_move_node_page(struct page *node_page, int gc_type);
 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
-			struct writeback_control *wbc, bool atomic);
+			struct writeback_control *wbc, bool atomic,
+			unsigned int *seq_id);
 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
 			struct writeback_control *wbc,
 			bool do_balance, enum iostat_type io_type);
-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
@@ -2847,7 +2913,7 @@
 void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
-void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
 			unsigned int segno, struct f2fs_summary_block *sum);
 void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
@@ -2925,9 +2991,10 @@
 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index);
 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
-bool f2fs_is_valid_meta_blkaddr(struct f2fs_sb_info *sbi,
-			block_t blkaddr, int type);
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+					block_t blkaddr, int type);
 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
 			int type, bool sync);
 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
@@ -2951,6 +3018,7 @@
 void f2fs_update_dirty_page(struct inode *inode, struct page *page);
 void f2fs_remove_dirty_inode(struct inode *inode);
 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
+void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi);
 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
 int __init f2fs_create_checkpoint_caches(void);
@@ -3389,7 +3457,7 @@
 
 	return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
 #else
-	return 0;
+	return false;
 #endif
 }
 
@@ -3411,4 +3479,11 @@
 			fscrypt_using_hardware_encryption(inode));
 }
 
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+							unsigned int type);
+#else
+#define f2fs_build_fault_attr(sbi, rate, type)		do { } while (0)
+#endif
+
 #endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 18f415a..4636b01 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -215,6 +215,7 @@
 		.nr_to_write = LONG_MAX,
 		.for_reclaim = 0,
 	};
+	unsigned int seq_id = 0;
 
 	if (unlikely(f2fs_readonly(inode->i_sb)))
 		return 0;
@@ -277,7 +278,7 @@
 	}
 sync_nodes:
 	atomic_inc(&sbi->wb_sync_req[NODE]);
-	ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic);
+	ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
 	atomic_dec(&sbi->wb_sync_req[NODE]);
 	if (ret)
 		goto out;
@@ -303,7 +304,7 @@
 	 * given fsync mark.
 	 */
 	if (!atomic) {
-		ret = f2fs_wait_on_node_pages_writeback(sbi, ino);
+		ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
 		if (ret)
 			goto out;
 	}
@@ -352,13 +353,13 @@
 	return pgofs;
 }
 
-static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
-							int whence)
+static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
+				pgoff_t dirty, pgoff_t pgofs, int whence)
 {
 	switch (whence) {
 	case SEEK_DATA:
 		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
-			is_valid_blkaddr(blkaddr))
+			is_valid_data_blkaddr(sbi, blkaddr))
 			return true;
 		break;
 	case SEEK_HOLE:
@@ -422,7 +423,15 @@
 			blkaddr = datablock_addr(dn.inode,
 					dn.node_page, dn.ofs_in_node);
 
-			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
+			if (__is_valid_data_blkaddr(blkaddr) &&
+				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
+						blkaddr, DATA_GENERIC)) {
+				f2fs_put_dnode(&dn);
+				goto fail;
+			}
+
+			if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
+							pgofs, whence)) {
 				f2fs_put_dnode(&dn);
 				goto found;
 			}
@@ -515,6 +524,11 @@
 
 		dn->data_blkaddr = NULL_ADDR;
 		f2fs_set_data_blkaddr(dn);
+
+		if (__is_valid_data_blkaddr(blkaddr) &&
+			!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+			continue;
+
 		f2fs_invalidate_blocks(sbi, blkaddr);
 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
 			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
@@ -656,12 +670,11 @@
 
 	trace_f2fs_truncate(inode);
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
 		f2fs_show_injection_info(FAULT_TRUNCATE);
 		return -EIO;
 	}
-#endif
+
 	/* we should check inline_data size */
 	if (!f2fs_may_inline_data(inode)) {
 		err = f2fs_convert_inline_inode(inode);
@@ -785,22 +798,26 @@
 	}
 
 	if (attr->ia_valid & ATTR_SIZE) {
-		if (attr->ia_size <= i_size_read(inode)) {
-			down_write(&F2FS_I(inode)->i_mmap_sem);
-			truncate_setsize(inode, attr->ia_size);
-			err = f2fs_truncate(inode);
-			up_write(&F2FS_I(inode)->i_mmap_sem);
-			if (err)
-				return err;
-		} else {
-			/*
-			 * do not trim all blocks after i_size if target size is
-			 * larger than i_size.
-			 */
-			down_write(&F2FS_I(inode)->i_mmap_sem);
-			truncate_setsize(inode, attr->ia_size);
-			up_write(&F2FS_I(inode)->i_mmap_sem);
+		bool to_smaller = (attr->ia_size <= i_size_read(inode));
 
+		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+		down_write(&F2FS_I(inode)->i_mmap_sem);
+
+		truncate_setsize(inode, attr->ia_size);
+
+		if (to_smaller)
+			err = f2fs_truncate(inode);
+		/*
+		 * do not trim all blocks after i_size if target size is
+		 * larger than i_size.
+		 */
+		up_write(&F2FS_I(inode)->i_mmap_sem);
+		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
+		if (err)
+			return err;
+
+		if (!to_smaller) {
 			/* should convert inline inode here */
 			if (!f2fs_may_inline_data(inode)) {
 				err = f2fs_convert_inline_inode(inode);
@@ -947,14 +964,19 @@
 
 			blk_start = (loff_t)pg_start << PAGE_SHIFT;
 			blk_end = (loff_t)pg_end << PAGE_SHIFT;
+
+			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 			down_write(&F2FS_I(inode)->i_mmap_sem);
+
 			truncate_inode_pages_range(mapping, blk_start,
 					blk_end - 1);
 
 			f2fs_lock_op(sbi);
 			ret = f2fs_truncate_hole(inode, pg_start, pg_end);
 			f2fs_unlock_op(sbi);
+
 			up_write(&F2FS_I(inode)->i_mmap_sem);
+			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 		}
 	}
 
@@ -1057,7 +1079,12 @@
 			if (ret)
 				return ret;
 
-			f2fs_get_node_info(sbi, dn.nid, &ni);
+			ret = f2fs_get_node_info(sbi, dn.nid, &ni);
+			if (ret) {
+				f2fs_put_dnode(&dn);
+				return ret;
+			}
+
 			ilen = min((pgoff_t)
 				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
 						dn.ofs_in_node, len - i);
@@ -1164,25 +1191,33 @@
 	return ret;
 }
 
-static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
+static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+	pgoff_t start = offset >> PAGE_SHIFT;
+	pgoff_t end = (offset + len) >> PAGE_SHIFT;
 	int ret;
 
 	f2fs_balance_fs(sbi, true);
+
+	/* avoid gc operation during block exchange */
+	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+	down_write(&F2FS_I(inode)->i_mmap_sem);
+
 	f2fs_lock_op(sbi);
-
 	f2fs_drop_extent_tree(inode);
-
+	truncate_pagecache(inode, offset);
 	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
 	f2fs_unlock_op(sbi);
+
+	up_write(&F2FS_I(inode)->i_mmap_sem);
+	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 	return ret;
 }
 
 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 {
-	pgoff_t pg_start, pg_end;
 	loff_t new_size;
 	int ret;
 
@@ -1197,25 +1232,17 @@
 	if (ret)
 		return ret;
 
-	pg_start = offset >> PAGE_SHIFT;
-	pg_end = (offset + len) >> PAGE_SHIFT;
-
-	/* avoid gc operation during block exchange */
-	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
-	down_write(&F2FS_I(inode)->i_mmap_sem);
 	/* write out all dirty pages from offset */
 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
 	if (ret)
-		goto out_unlock;
+		return ret;
 
-	truncate_pagecache(inode, offset);
-
-	ret = f2fs_do_collapse(inode, pg_start, pg_end);
+	ret = f2fs_do_collapse(inode, offset, len);
 	if (ret)
-		goto out_unlock;
+		return ret;
 
 	/* write out all moved pages, if possible */
+	down_write(&F2FS_I(inode)->i_mmap_sem);
 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
 	truncate_pagecache(inode, offset);
 
@@ -1223,11 +1250,9 @@
 	truncate_pagecache(inode, new_size);
 
 	ret = f2fs_truncate_blocks(inode, new_size, true);
+	up_write(&F2FS_I(inode)->i_mmap_sem);
 	if (!ret)
 		f2fs_i_size_write(inode, new_size);
-out_unlock:
-	up_write(&F2FS_I(inode)->i_mmap_sem);
-	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 	return ret;
 }
 
@@ -1293,12 +1318,9 @@
 	if (ret)
 		return ret;
 
-	down_write(&F2FS_I(inode)->i_mmap_sem);
 	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
 	if (ret)
-		goto out_sem;
-
-	truncate_pagecache_range(inode, offset, offset + len - 1);
+		return ret;
 
 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
@@ -1310,7 +1332,7 @@
 		ret = fill_zero(inode, pg_start, off_start,
 						off_end - off_start);
 		if (ret)
-			goto out_sem;
+			return ret;
 
 		new_size = max_t(loff_t, new_size, offset + len);
 	} else {
@@ -1318,7 +1340,7 @@
 			ret = fill_zero(inode, pg_start++, off_start,
 						PAGE_SIZE - off_start);
 			if (ret)
-				goto out_sem;
+				return ret;
 
 			new_size = max_t(loff_t, new_size,
 					(loff_t)pg_start << PAGE_SHIFT);
@@ -1329,12 +1351,21 @@
 			unsigned int end_offset;
 			pgoff_t end;
 
+			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+			down_write(&F2FS_I(inode)->i_mmap_sem);
+
+			truncate_pagecache_range(inode,
+				(loff_t)index << PAGE_SHIFT,
+				((loff_t)pg_end << PAGE_SHIFT) - 1);
+
 			f2fs_lock_op(sbi);
 
 			set_new_dnode(&dn, inode, NULL, NULL, 0);
 			ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
 			if (ret) {
 				f2fs_unlock_op(sbi);
+				up_write(&F2FS_I(inode)->i_mmap_sem);
+				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 				goto out;
 			}
 
@@ -1343,7 +1374,10 @@
 
 			ret = f2fs_do_zero_range(&dn, index, end);
 			f2fs_put_dnode(&dn);
+
 			f2fs_unlock_op(sbi);
+			up_write(&F2FS_I(inode)->i_mmap_sem);
+			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
 			f2fs_balance_fs(sbi, dn.node_changed);
 
@@ -1371,9 +1405,6 @@
 		else
 			f2fs_i_size_write(inode, new_size);
 	}
-out_sem:
-	up_write(&F2FS_I(inode)->i_mmap_sem);
-
 	return ret;
 }
 
@@ -1402,26 +1433,27 @@
 
 	f2fs_balance_fs(sbi, true);
 
-	/* avoid gc operation during block exchange */
-	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
 	down_write(&F2FS_I(inode)->i_mmap_sem);
 	ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+	up_write(&F2FS_I(inode)->i_mmap_sem);
 	if (ret)
-		goto out;
+		return ret;
 
 	/* write out all dirty pages from offset */
 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
 	if (ret)
-		goto out;
-
-	truncate_pagecache(inode, offset);
+		return ret;
 
 	pg_start = offset >> PAGE_SHIFT;
 	pg_end = (offset + len) >> PAGE_SHIFT;
 	delta = pg_end - pg_start;
 	idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
 
+	/* avoid gc operation during block exchange */
+	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+	down_write(&F2FS_I(inode)->i_mmap_sem);
+	truncate_pagecache(inode, offset);
+
 	while (!ret && idx > pg_start) {
 		nr = idx - pg_start;
 		if (nr > delta)
@@ -1435,16 +1467,17 @@
 					idx + delta, nr, false);
 		f2fs_unlock_op(sbi);
 	}
+	up_write(&F2FS_I(inode)->i_mmap_sem);
+	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
 	/* write out all moved pages, if possible */
+	down_write(&F2FS_I(inode)->i_mmap_sem);
 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
 	truncate_pagecache(inode, offset);
+	up_write(&F2FS_I(inode)->i_mmap_sem);
 
 	if (!ret)
 		f2fs_i_size_write(inode, new_size);
-out:
-	up_write(&F2FS_I(inode)->i_mmap_sem);
-	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 	return ret;
 }
 
@@ -1600,7 +1633,7 @@
 	struct f2fs_inode_info *fi = F2FS_I(inode);
 	unsigned int flags = fi->i_flags;
 
-	if (file_is_encrypt(inode))
+	if (f2fs_encrypted_inode(inode))
 		flags |= F2FS_ENCRYPT_FL;
 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
 		flags |= F2FS_INLINE_DATA_FL;
@@ -1684,15 +1717,18 @@
 
 	inode_lock(inode);
 
-	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
-	if (f2fs_is_atomic_file(inode))
+	if (f2fs_is_atomic_file(inode)) {
+		if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
+			ret = -EINVAL;
 		goto out;
+	}
 
 	ret = f2fs_convert_inline_inode(inode);
 	if (ret)
 		goto out;
 
+	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
 	if (!get_dirty_pages(inode))
 		goto skip_flush;
 
@@ -1700,18 +1736,20 @@
 		"Unexpected flush for atomic writes: ino=%lu, npages=%u",
 					inode->i_ino, get_dirty_pages(inode));
 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
-	if (ret)
+	if (ret) {
+		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 		goto out;
+	}
 skip_flush:
 	set_inode_flag(inode, FI_ATOMIC_FILE);
 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
-	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
+	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
 	F2FS_I(inode)->inmem_task = current;
 	stat_inc_atomic_write(inode);
 	stat_update_max_atomic_write(inode);
 out:
-	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 	inode_unlock(inode);
 	mnt_drop_write_file(filp);
 	return ret;
@@ -1729,9 +1767,9 @@
 	if (ret)
 		return ret;
 
-	inode_lock(inode);
+	f2fs_balance_fs(F2FS_I_SB(inode), true);
 
-	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+	inode_lock(inode);
 
 	if (f2fs_is_volatile_file(inode)) {
 		ret = -EINVAL;
@@ -1757,7 +1795,6 @@
 		clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
 		ret = -EINVAL;
 	}
-	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 	inode_unlock(inode);
 	mnt_drop_write_file(filp);
 	return ret;
@@ -1849,6 +1886,8 @@
 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
 	}
 
+	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
+
 	inode_unlock(inode);
 
 	mnt_drop_write_file(filp);
@@ -1885,6 +1924,7 @@
 		}
 		if (sb) {
 			f2fs_stop_checkpoint(sbi, false);
+			set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
 			thaw_bdev(sb->s_bdev, sb);
 		}
 		break;
@@ -1894,13 +1934,16 @@
 		if (ret)
 			goto out;
 		f2fs_stop_checkpoint(sbi, false);
+		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
 		break;
 	case F2FS_GOING_DOWN_NOSYNC:
 		f2fs_stop_checkpoint(sbi, false);
+		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
 		break;
 	case F2FS_GOING_DOWN_METAFLUSH:
 		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
 		f2fs_stop_checkpoint(sbi, false);
+		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
 		break;
 	default:
 		ret = -EINVAL;
@@ -2103,7 +2146,7 @@
 	return ret;
 }
 
-static int f2fs_ioc_f2fs_write_checkpoint(struct file *filp, unsigned long arg)
+static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
 {
 	struct inode *inode = file_inode(filp);
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -2347,15 +2390,10 @@
 	}
 
 	inode_lock(src);
-	down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
 	if (src != dst) {
 		ret = -EBUSY;
 		if (!inode_trylock(dst))
 			goto out;
-		if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) {
-			inode_unlock(dst);
-			goto out;
-		}
 	}
 
 	ret = -EINVAL;
@@ -2400,6 +2438,14 @@
 		goto out_unlock;
 
 	f2fs_balance_fs(sbi, true);
+
+	down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+	if (src != dst) {
+		ret = -EBUSY;
+		if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
+			goto out_src;
+	}
+
 	f2fs_lock_op(sbi);
 	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
 				pos_out >> F2FS_BLKSIZE_BITS,
@@ -2412,13 +2458,15 @@
 			f2fs_i_size_write(dst, dst_osize);
 	}
 	f2fs_unlock_op(sbi);
-out_unlock:
-	if (src != dst) {
+
+	if (src != dst)
 		up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
-		inode_unlock(dst);
-	}
-out:
+out_src:
 	up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+out_unlock:
+	if (src != dst)
+		inode_unlock(dst);
+out:
 	inode_unlock(src);
 	return ret;
 }
@@ -2590,7 +2638,7 @@
 
 	if (!pin) {
 		clear_inode_flag(inode, FI_PIN_FILE);
-		F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = 1;
+		f2fs_i_gc_failures_write(inode, 0);
 		goto done;
 	}
 
@@ -2696,7 +2744,7 @@
 	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
 		return f2fs_ioc_gc_range(filp, arg);
 	case F2FS_IOC_WRITE_CHECKPOINT:
-		return f2fs_ioc_f2fs_write_checkpoint(filp, arg);
+		return f2fs_ioc_write_checkpoint(filp, arg);
 	case F2FS_IOC_DEFRAGMENT:
 		return f2fs_ioc_defragment(filp, arg);
 	case F2FS_IOC_MOVE_RANGE:
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 9be5b50..c6322ef 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -53,12 +53,10 @@
 			continue;
 		}
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 		if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
 			f2fs_show_injection_info(FAULT_CHECKPOINT);
 			f2fs_stop_checkpoint(sbi, false);
 		}
-#endif
 
 		if (!sb_start_write_trylock(sbi->sb))
 			continue;
@@ -517,7 +515,11 @@
 			continue;
 		}
 
-		f2fs_get_node_info(sbi, nid, &ni);
+		if (f2fs_get_node_info(sbi, nid, &ni)) {
+			f2fs_put_page(node_page, 1);
+			continue;
+		}
+
 		if (ni.blk_addr != start_addr + off) {
 			f2fs_put_page(node_page, 1);
 			continue;
@@ -576,7 +578,10 @@
 	if (IS_ERR(node_page))
 		return false;
 
-	f2fs_get_node_info(sbi, nid, dni);
+	if (f2fs_get_node_info(sbi, nid, dni)) {
+		f2fs_put_page(node_page, 1);
+		return false;
+	}
 
 	if (sum->version != dni->version) {
 		f2fs_msg(sbi->sb, KERN_WARNING,
@@ -594,6 +599,72 @@
 	return true;
 }
 
+static int ra_data_block(struct inode *inode, pgoff_t index)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct address_space *mapping = inode->i_mapping;
+	struct dnode_of_data dn;
+	struct page *page;
+	struct extent_info ei = {0, 0, 0};
+	struct f2fs_io_info fio = {
+		.sbi = sbi,
+		.ino = inode->i_ino,
+		.type = DATA,
+		.temp = COLD,
+		.op = REQ_OP_READ,
+		.op_flags = 0,
+		.encrypted_page = NULL,
+		.in_list = false,
+		.retry = false,
+	};
+	int err;
+
+	page = f2fs_grab_cache_page(mapping, index, true);
+	if (!page)
+		return -ENOMEM;
+
+	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+		dn.data_blkaddr = ei.blk + index - ei.fofs;
+		goto got_it;
+	}
+
+	set_new_dnode(&dn, inode, NULL, NULL, 0);
+	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+	if (err)
+		goto put_page;
+	f2fs_put_dnode(&dn);
+
+	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
+						DATA_GENERIC))) {
+		err = -EFAULT;
+		goto put_page;
+	}
+got_it:
+	/* read page */
+	fio.page = page;
+	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
+
+	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
+					dn.data_blkaddr,
+					FGP_LOCK | FGP_CREAT, GFP_NOFS);
+	if (!fio.encrypted_page) {
+		err = -ENOMEM;
+		goto put_page;
+	}
+
+	err = f2fs_submit_page_bio(&fio);
+	if (err)
+		goto put_encrypted_page;
+	f2fs_put_page(fio.encrypted_page, 0);
+	f2fs_put_page(page, 1);
+	return 0;
+put_encrypted_page:
+	f2fs_put_page(fio.encrypted_page, 1);
+put_page:
+	f2fs_put_page(page, 1);
+	return err;
+}
+
 /*
  * Move data block via META_MAPPING while keeping locked data page.
  * This can be used to move blocks, aka LBAs, directly on disk.
@@ -615,7 +686,7 @@
 	struct dnode_of_data dn;
 	struct f2fs_summary sum;
 	struct node_info ni;
-	struct page *page;
+	struct page *page, *mpage;
 	block_t newaddr;
 	int err;
 	bool lfs_mode = test_opt(fio.sbi, LFS);
@@ -655,7 +726,10 @@
 	 */
 	f2fs_wait_on_page_writeback(page, DATA, true);
 
-	f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+	if (err)
+		goto put_out;
+
 	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
 
 	/* read page */
@@ -675,6 +749,23 @@
 		goto recover_block;
 	}
 
+	mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
+					fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
+	if (mpage) {
+		bool updated = false;
+
+		if (PageUptodate(mpage)) {
+			memcpy(page_address(fio.encrypted_page),
+					page_address(mpage), PAGE_SIZE);
+			updated = true;
+		}
+		f2fs_put_page(mpage, 1);
+		invalidate_mapping_pages(META_MAPPING(fio.sbi),
+					fio.old_blkaddr, fio.old_blkaddr);
+		if (updated)
+			goto write_page;
+	}
+
 	err = f2fs_submit_page_bio(&fio);
 	if (err)
 		goto put_page_out;
@@ -691,6 +782,7 @@
 		goto put_page_out;
 	}
 
+write_page:
 	set_page_dirty(fio.encrypted_page);
 	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
 	if (clear_page_dirty_for_io(fio.encrypted_page))
@@ -865,22 +957,30 @@
 			if (IS_ERR(inode) || is_bad_inode(inode))
 				continue;
 
-			/* if inode uses special I/O path, let's go phase 3 */
+			if (!down_write_trylock(
+				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
+				iput(inode);
+				sbi->skipped_gc_rwsem++;
+				continue;
+			}
+
+			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
+								ofs_in_node;
+
 			if (f2fs_post_read_required(inode)) {
+				int err = ra_data_block(inode, start_bidx);
+
+				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+				if (err) {
+					iput(inode);
+					continue;
+				}
 				add_gc_inode(gc_list, inode);
 				continue;
 			}
 
-			if (!down_write_trylock(
-				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
-				iput(inode);
-				continue;
-			}
-
-			start_bidx = f2fs_start_bidx_of_node(nofs, inode);
 			data_page = f2fs_get_read_data_page(inode,
-					start_bidx + ofs_in_node, REQ_RAHEAD,
-					true);
+						start_bidx, REQ_RAHEAD, true);
 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 			if (IS_ERR(data_page)) {
 				iput(inode);
@@ -903,6 +1003,7 @@
 					continue;
 				if (!down_write_trylock(
 						&fi->i_gc_rwsem[WRITE])) {
+					sbi->skipped_gc_rwsem++;
 					up_write(&fi->i_gc_rwsem[READ]);
 					continue;
 				}
@@ -1040,6 +1141,7 @@
 		.iroot = RADIX_TREE_INIT(GFP_NOFS),
 	};
 	unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
+	unsigned long long first_skipped;
 	unsigned int skipped_round = 0, round = 0;
 
 	trace_f2fs_gc_begin(sbi->sb, sync, background,
@@ -1052,6 +1154,8 @@
 				prefree_segments(sbi));
 
 	cpc.reason = __get_cp_reason(sbi);
+	sbi->skipped_gc_rwsem = 0;
+	first_skipped = last_skipped;
 gc_more:
 	if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
 		ret = -EINVAL;
@@ -1093,7 +1197,8 @@
 	total_freed += seg_freed;
 
 	if (gc_type == FG_GC) {
-		if (sbi->skipped_atomic_files[FG_GC] > last_skipped)
+		if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
+						sbi->skipped_gc_rwsem)
 			skipped_round++;
 		last_skipped = sbi->skipped_atomic_files[FG_GC];
 		round++;
@@ -1102,15 +1207,23 @@
 	if (gc_type == FG_GC)
 		sbi->cur_victim_sec = NULL_SEGNO;
 
-	if (!sync) {
-		if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
-			if (skipped_round > MAX_SKIP_ATOMIC_COUNT &&
-				skipped_round * 2 >= round)
-				f2fs_drop_inmem_pages_all(sbi, true);
+	if (sync)
+		goto stop;
+
+	if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
+		if (skipped_round <= MAX_SKIP_GC_COUNT ||
+					skipped_round * 2 < round) {
 			segno = NULL_SEGNO;
 			goto gc_more;
 		}
 
+		if (first_skipped < last_skipped &&
+				(last_skipped - first_skipped) >
+						sbi->skipped_gc_rwsem) {
+			f2fs_drop_inmem_pages_all(sbi, true);
+			segno = NULL_SEGNO;
+			goto gc_more;
+		}
 		if (gc_type == FG_GC)
 			ret = f2fs_write_checkpoint(sbi, &cpc);
 	}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index dd4cde8..df71d26 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -139,6 +139,7 @@
 		.encrypted_page = NULL,
 		.io_type = FS_DATA_IO,
 	};
+	struct node_info ni;
 	int dirty, err;
 
 	if (!f2fs_exist_data(dn->inode))
@@ -148,6 +149,14 @@
 	if (err)
 		return err;
 
+	err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
+	if (err) {
+		f2fs_put_dnode(dn);
+		return err;
+	}
+
+	fio.version = ni.version;
+
 	if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
 		f2fs_put_dnode(dn);
 		set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
@@ -516,6 +525,7 @@
 	return 0;
 recover:
 	lock_page(ipage);
+	f2fs_wait_on_page_writeback(ipage, NODE, true);
 	memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
 	f2fs_i_depth_write(dir, 0);
 	f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
@@ -707,7 +717,10 @@
 		ilen = start + len;
 	ilen -= start;
 
-	f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+	err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+	if (err)
+		goto out;
+
 	byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
 	byteaddr += (char *)inline_data_addr(inode, ipage) -
 					(char *)F2FS_INODE(ipage);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 30a7773..959df22 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -68,13 +68,16 @@
 	}
 }
 
-static bool __written_first_block(struct f2fs_inode *ri)
+static int __written_first_block(struct f2fs_sb_info *sbi,
+					struct f2fs_inode *ri)
 {
 	block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
 
-	if (is_valid_blkaddr(addr))
-		return true;
-	return false;
+	if (!__is_valid_data_blkaddr(addr))
+		return 1;
+	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
+		return -EFAULT;
+	return 0;
 }
 
 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -121,7 +124,7 @@
 	if (!f2fs_sb_has_inode_chksum(sbi->sb))
 		return false;
 
-	if (!RAW_IS_INODE(F2FS_NODE(page)) || !(ri->i_inline & F2FS_EXTRA_ATTR))
+	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
 		return false;
 
 	if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
@@ -159,8 +162,15 @@
 	struct f2fs_inode *ri;
 	__u32 provided, calculated;
 
+	if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
+		return true;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+	if (!f2fs_enable_inode_chksum(sbi, page))
+#else
 	if (!f2fs_enable_inode_chksum(sbi, page) ||
 			PageDirty(page) || PageWriteback(page))
+#endif
 		return true;
 
 	ri = &F2FS_NODE(page)->i;
@@ -185,9 +195,31 @@
 	ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
 }
 
-static bool sanity_check_inode(struct inode *inode)
+static bool sanity_check_inode(struct inode *inode, struct page *node_page)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct f2fs_inode_info *fi = F2FS_I(inode);
+	unsigned long long iblocks;
+
+	iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
+	if (!iblocks) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
+			"run fsck to fix.",
+			__func__, inode->i_ino, iblocks);
+		return false;
+	}
+
+	if (ino_of_node(node_page) != nid_of_node(node_page)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: corrupted inode footer i_ino=%lx, ino,nid: "
+			"[%u, %u] run fsck to fix.",
+			__func__, inode->i_ino,
+			ino_of_node(node_page), nid_of_node(node_page));
+		return false;
+	}
 
 	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)
 			&& !f2fs_has_extra_attr(inode)) {
@@ -197,6 +229,64 @@
 			__func__, inode->i_ino);
 		return false;
 	}
+
+	if (f2fs_has_extra_attr(inode) &&
+			!f2fs_sb_has_extra_attr(sbi->sb)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: inode (ino=%lx) is with extra_attr, "
+			"but extra_attr feature is off",
+			__func__, inode->i_ino);
+		return false;
+	}
+
+	if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
+			fi->i_extra_isize % sizeof(__le32)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: inode (ino=%lx) has corrupted i_extra_isize: %d, "
+			"max: %zu",
+			__func__, inode->i_ino, fi->i_extra_isize,
+			F2FS_TOTAL_EXTRA_ATTR_SIZE);
+		return false;
+	}
+
+	if (F2FS_I(inode)->extent_tree) {
+		struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
+
+		if (ei->len &&
+			(!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
+			!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
+							DATA_GENERIC))) {
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"%s: inode (ino=%lx) extent info [%u, %u, %u] "
+				"is incorrect, run fsck to fix",
+				__func__, inode->i_ino,
+				ei->blk, ei->fofs, ei->len);
+			return false;
+		}
+	}
+
+	if (f2fs_has_inline_data(inode) &&
+			(!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: inode (ino=%lx, mode=%u) should not have "
+			"inline_data, run fsck to fix",
+			__func__, inode->i_ino, inode->i_mode);
+		return false;
+	}
+
+	if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: inode (ino=%lx, mode=%u) should not have "
+			"inline_dentry, run fsck to fix",
+			__func__, inode->i_ino, inode->i_mode);
+		return false;
+	}
+
 	return true;
 }
 
@@ -207,6 +297,7 @@
 	struct page *node_page;
 	struct f2fs_inode *ri;
 	projid_t i_projid;
+	int err;
 
 	/* Check if ino is within scope */
 	if (f2fs_check_nid_range(sbi, inode->i_ino))
@@ -268,6 +359,11 @@
 		fi->i_inline_xattr_size = 0;
 	}
 
+	if (!sanity_check_inode(inode, node_page)) {
+		f2fs_put_page(node_page, 1);
+		return -EINVAL;
+	}
+
 	/* check data exist */
 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
 		__recover_inline_status(inode, node_page);
@@ -275,8 +371,15 @@
 	/* get rdev by using inline_info */
 	__get_inode_rdev(inode, ri);
 
-	if (__written_first_block(ri))
-		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+	if (S_ISREG(inode->i_mode)) {
+		err = __written_first_block(sbi, ri);
+		if (err < 0) {
+			f2fs_put_page(node_page, 1);
+			return err;
+		}
+		if (!err)
+			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+	}
 
 	if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
 		fi->last_disk_size = inode->i_size;
@@ -330,10 +433,6 @@
 	ret = do_read_inode(inode);
 	if (ret)
 		goto bad_inode;
-	if (!sanity_check_inode(inode)) {
-		ret = -EINVAL;
-		goto bad_inode;
-	}
 make_now:
 	if (ino == F2FS_NODE_INO(sbi)) {
 		inode->i_mapping->a_ops = &f2fs_node_aops;
@@ -474,6 +573,10 @@
 	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
 	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
 	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+	f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
+#endif
 }
 
 void f2fs_update_inode_page(struct inode *inode)
@@ -558,12 +661,11 @@
 	if (F2FS_HAS_BLOCKS(inode))
 		err = f2fs_truncate(inode);
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
 		f2fs_show_injection_info(FAULT_EVICT_INODE);
 		err = -EIO;
 	}
-#endif
+
 	if (!err) {
 		f2fs_lock_op(sbi);
 		err = f2fs_remove_inode_page(inode);
@@ -626,6 +728,7 @@
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct node_info ni;
+	int err;
 
 	/*
 	 * clear nlink of inode in order to release resource of inode
@@ -648,10 +751,16 @@
 	 * so we can prevent losing this orphan when encoutering checkpoint
 	 * and following suddenly power-off.
 	 */
-	f2fs_get_node_info(sbi, inode->i_ino, &ni);
+	err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+	if (err) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"May loss orphan inode, run fsck to fix.");
+		goto out;
+	}
 
 	if (ni.blk_addr != NULL_ADDR) {
-		int err = f2fs_acquire_orphan_inode(sbi);
+		err = f2fs_acquire_orphan_inode(sbi);
 		if (err) {
 			set_sbi_flag(sbi, SBI_NEED_FSCK);
 			f2fs_msg(sbi->sb, KERN_WARNING,
@@ -664,6 +773,7 @@
 		set_inode_flag(inode, FI_FREE_NID);
 	}
 
+out:
 	f2fs_unlock_op(sbi);
 
 	/* iput will drop the inode object */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index c53760e..56593b3 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -246,7 +246,7 @@
 		return -EINVAL;
 
 	if (hot) {
-		strncpy(extlist[count], name, strlen(name));
+		memcpy(extlist[count], name, strlen(name));
 		sbi->raw_super->hot_ext_count = hot_count + 1;
 	} else {
 		char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];
@@ -254,7 +254,7 @@
 		memcpy(buf, &extlist[cold_count],
 				F2FS_EXTENSION_LEN * hot_count);
 		memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN);
-		strncpy(extlist[cold_count], name, strlen(name));
+		memcpy(extlist[cold_count], name, strlen(name));
 		memcpy(&extlist[cold_count + 1], buf,
 				F2FS_EXTENSION_LEN * hot_count);
 		sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index aab2dd2..f213a53 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -28,6 +28,7 @@
 static struct kmem_cache *nat_entry_slab;
 static struct kmem_cache *free_nid_slab;
 static struct kmem_cache *nat_entry_set_slab;
+static struct kmem_cache *fsync_node_entry_slab;
 
 /*
  * Check whether the given nid is within node id range.
@@ -112,25 +113,22 @@
 
 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
 {
-	pgoff_t index = current_nat_addr(sbi, nid);
-	return f2fs_get_meta_page(sbi, index);
+	return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
 }
 
 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
 {
 	struct page *src_page;
 	struct page *dst_page;
-	pgoff_t src_off;
 	pgoff_t dst_off;
 	void *src_addr;
 	void *dst_addr;
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 
-	src_off = current_nat_addr(sbi, nid);
-	dst_off = next_nat_addr(sbi, src_off);
+	dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
 
 	/* get current nat block page with lock */
-	src_page = f2fs_get_meta_page(sbi, src_off);
+	src_page = get_current_nat_page(sbi, nid);
 	dst_page = f2fs_grab_meta_page(sbi, dst_off);
 	f2fs_bug_on(sbi, PageDirty(src_page));
 
@@ -176,14 +174,30 @@
 
 	if (raw_ne)
 		node_info_from_raw_nat(&ne->ni, raw_ne);
+
+	spin_lock(&nm_i->nat_list_lock);
 	list_add_tail(&ne->list, &nm_i->nat_entries);
+	spin_unlock(&nm_i->nat_list_lock);
+
 	nm_i->nat_cnt++;
 	return ne;
 }
 
 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
 {
-	return radix_tree_lookup(&nm_i->nat_root, n);
+	struct nat_entry *ne;
+
+	ne = radix_tree_lookup(&nm_i->nat_root, n);
+
+	/* for recent accessed nat entry, move it to tail of lru list */
+	if (ne && !get_nat_flag(ne, IS_DIRTY)) {
+		spin_lock(&nm_i->nat_list_lock);
+		if (!list_empty(&ne->list))
+			list_move_tail(&ne->list, &nm_i->nat_entries);
+		spin_unlock(&nm_i->nat_list_lock);
+	}
+
+	return ne;
 }
 
 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
@@ -194,7 +208,6 @@
 
 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
 {
-	list_del(&e->list);
 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
 	nm_i->nat_cnt--;
 	__free_nat_entry(e);
@@ -245,16 +258,21 @@
 	nm_i->dirty_nat_cnt++;
 	set_nat_flag(ne, IS_DIRTY, true);
 refresh_list:
+	spin_lock(&nm_i->nat_list_lock);
 	if (new_ne)
 		list_del_init(&ne->list);
 	else
 		list_move_tail(&ne->list, &head->entry_list);
+	spin_unlock(&nm_i->nat_list_lock);
 }
 
 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
 		struct nat_entry_set *set, struct nat_entry *ne)
 {
+	spin_lock(&nm_i->nat_list_lock);
 	list_move_tail(&ne->list, &nm_i->nat_entries);
+	spin_unlock(&nm_i->nat_list_lock);
+
 	set_nat_flag(ne, IS_DIRTY, false);
 	set->entry_cnt--;
 	nm_i->dirty_nat_cnt--;
@@ -267,6 +285,72 @@
 							start, nr);
 }
 
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
+{
+	return NODE_MAPPING(sbi) == page->mapping &&
+			IS_DNODE(page) && is_cold_node(page);
+}
+
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+	spin_lock_init(&sbi->fsync_node_lock);
+	INIT_LIST_HEAD(&sbi->fsync_node_list);
+	sbi->fsync_seg_id = 0;
+	sbi->fsync_node_num = 0;
+}
+
+static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
+							struct page *page)
+{
+	struct fsync_node_entry *fn;
+	unsigned long flags;
+	unsigned int seq_id;
+
+	fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
+
+	get_page(page);
+	fn->page = page;
+	INIT_LIST_HEAD(&fn->list);
+
+	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+	list_add_tail(&fn->list, &sbi->fsync_node_list);
+	fn->seq_id = sbi->fsync_seg_id++;
+	seq_id = fn->seq_id;
+	sbi->fsync_node_num++;
+	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+
+	return seq_id;
+}
+
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
+{
+	struct fsync_node_entry *fn;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+	list_for_each_entry(fn, &sbi->fsync_node_list, list) {
+		if (fn->page == page) {
+			list_del(&fn->list);
+			sbi->fsync_node_num--;
+			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+			kmem_cache_free(fsync_node_entry_slab, fn);
+			put_page(page);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+	f2fs_bug_on(sbi, 1);
+}
+
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+	sbi->fsync_seg_id = 0;
+	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+}
+
 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -371,7 +455,7 @@
 			new_blkaddr == NULL_ADDR);
 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
 			new_blkaddr == NEW_ADDR);
-	f2fs_bug_on(sbi, is_valid_blkaddr(nat_get_blkaddr(e)) &&
+	f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
 			new_blkaddr == NEW_ADDR);
 
 	/* increment version no as node is removed */
@@ -382,7 +466,7 @@
 
 	/* change address */
 	nat_set_blkaddr(e, new_blkaddr);
-	if (!is_valid_blkaddr(new_blkaddr))
+	if (!is_valid_data_blkaddr(sbi, new_blkaddr))
 		set_nat_flag(e, IS_CHECKPOINTED, false);
 	__set_nat_cache_dirty(nm_i, e);
 
@@ -405,13 +489,25 @@
 	if (!down_write_trylock(&nm_i->nat_tree_lock))
 		return 0;
 
-	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
+	spin_lock(&nm_i->nat_list_lock);
+	while (nr_shrink) {
 		struct nat_entry *ne;
+
+		if (list_empty(&nm_i->nat_entries))
+			break;
+
 		ne = list_first_entry(&nm_i->nat_entries,
 					struct nat_entry, list);
+		list_del(&ne->list);
+		spin_unlock(&nm_i->nat_list_lock);
+
 		__del_from_nat_cache(nm_i, ne);
 		nr_shrink--;
+
+		spin_lock(&nm_i->nat_list_lock);
 	}
+	spin_unlock(&nm_i->nat_list_lock);
+
 	up_write(&nm_i->nat_tree_lock);
 	return nr - nr_shrink;
 }
@@ -419,7 +515,7 @@
 /*
  * This function always returns success
  */
-void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
 						struct node_info *ni)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -443,7 +539,7 @@
 		ni->blk_addr = nat_get_blkaddr(e);
 		ni->version = nat_get_version(e);
 		up_read(&nm_i->nat_tree_lock);
-		return;
+		return 0;
 	}
 
 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
@@ -466,6 +562,9 @@
 	up_read(&nm_i->nat_tree_lock);
 
 	page = f2fs_get_meta_page(sbi, index);
+	if (IS_ERR(page))
+		return PTR_ERR(page);
+
 	nat_blk = (struct f2fs_nat_block *)page_address(page);
 	ne = nat_blk->entries[nid - start_nid];
 	node_info_from_raw_nat(ni, &ne);
@@ -473,6 +572,7 @@
 cache:
 	/* cache nat entry */
 	cache_nat_entry(sbi, nid, &ne);
+	return 0;
 }
 
 /*
@@ -722,12 +822,15 @@
 	return err;
 }
 
-static void truncate_node(struct dnode_of_data *dn)
+static int truncate_node(struct dnode_of_data *dn)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct node_info ni;
+	int err;
 
-	f2fs_get_node_info(sbi, dn->nid, &ni);
+	err = f2fs_get_node_info(sbi, dn->nid, &ni);
+	if (err)
+		return err;
 
 	/* Deallocate node address */
 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
@@ -750,11 +853,14 @@
 
 	dn->node_page = NULL;
 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
+
+	return 0;
 }
 
 static int truncate_dnode(struct dnode_of_data *dn)
 {
 	struct page *page;
+	int err;
 
 	if (dn->nid == 0)
 		return 1;
@@ -770,7 +876,10 @@
 	dn->node_page = page;
 	dn->ofs_in_node = 0;
 	f2fs_truncate_data_blocks(dn);
-	truncate_node(dn);
+	err = truncate_node(dn);
+	if (err)
+		return err;
+
 	return 1;
 }
 
@@ -835,7 +944,9 @@
 	if (!ofs) {
 		/* remove current indirect node */
 		dn->node_page = page;
-		truncate_node(dn);
+		ret = truncate_node(dn);
+		if (ret)
+			goto out_err;
 		freed++;
 	} else {
 		f2fs_put_page(page, 1);
@@ -893,7 +1004,9 @@
 	if (offset[idx + 1] == 0) {
 		dn->node_page = pages[idx];
 		dn->nid = nid[idx];
-		truncate_node(dn);
+		err = truncate_node(dn);
+		if (err)
+			goto fail;
 	} else {
 		f2fs_put_page(pages[idx], 1);
 	}
@@ -1014,6 +1127,7 @@
 	nid_t nid = F2FS_I(inode)->i_xattr_nid;
 	struct dnode_of_data dn;
 	struct page *npage;
+	int err;
 
 	if (!nid)
 		return 0;
@@ -1022,10 +1136,15 @@
 	if (IS_ERR(npage))
 		return PTR_ERR(npage);
 
+	set_new_dnode(&dn, inode, NULL, npage, nid);
+	err = truncate_node(&dn);
+	if (err) {
+		f2fs_put_page(npage, 1);
+		return err;
+	}
+
 	f2fs_i_xnid_write(inode, 0);
 
-	set_new_dnode(&dn, inode, NULL, npage, nid);
-	truncate_node(&dn);
 	return 0;
 }
 
@@ -1055,11 +1174,19 @@
 		f2fs_truncate_data_blocks_range(&dn, 1);
 
 	/* 0 is possible, after f2fs_new_inode() has failed */
+	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+		f2fs_put_dnode(&dn);
+		return -EIO;
+	}
 	f2fs_bug_on(F2FS_I_SB(inode),
 			inode->i_blocks != 0 && inode->i_blocks != 8);
 
 	/* will put inode & node pages */
-	truncate_node(&dn);
+	err = truncate_node(&dn);
+	if (err) {
+		f2fs_put_dnode(&dn);
+		return err;
+	}
 	return 0;
 }
 
@@ -1092,7 +1219,11 @@
 		goto fail;
 
 #ifdef CONFIG_F2FS_CHECK_FS
-	f2fs_get_node_info(sbi, dn->nid, &new_ni);
+	err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
+	if (err) {
+		dec_valid_node_count(sbi, dn->inode, !ofs);
+		goto fail;
+	}
 	f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
 #endif
 	new_ni.nid = dn->nid;
@@ -1140,13 +1271,21 @@
 		.page = page,
 		.encrypted_page = NULL,
 	};
+	int err;
 
-	if (PageUptodate(page))
+	if (PageUptodate(page)) {
+#ifdef CONFIG_F2FS_CHECK_FS
+		f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
+#endif
 		return LOCKED_PAGE;
+	}
 
-	f2fs_get_node_info(sbi, page->index, &ni);
+	err = f2fs_get_node_info(sbi, page->index, &ni);
+	if (err)
+		return err;
 
-	if (unlikely(ni.blk_addr == NULL_ADDR)) {
+	if (unlikely(ni.blk_addr == NULL_ADDR) ||
+			is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
 		ClearPageUptodate(page);
 		return -ENOENT;
 	}
@@ -1348,7 +1487,7 @@
 
 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
 				struct writeback_control *wbc, bool do_balance,
-				enum iostat_type io_type)
+				enum iostat_type io_type, unsigned int *seq_id)
 {
 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
 	nid_t nid;
@@ -1365,6 +1504,7 @@
 		.io_type = io_type,
 		.io_wbc = wbc,
 	};
+	unsigned int seq;
 
 	trace_f2fs_writepage(page, NODE);
 
@@ -1374,10 +1514,17 @@
 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
 		goto redirty_out;
 
+	if (wbc->sync_mode == WB_SYNC_NONE &&
+			IS_DNODE(page) && is_cold_node(page))
+		goto redirty_out;
+
 	/* get old block addr of this node page */
 	nid = nid_of_node(page);
 	f2fs_bug_on(sbi, page->index != nid);
 
+	if (f2fs_get_node_info(sbi, nid, &ni))
+		goto redirty_out;
+
 	if (wbc->for_reclaim) {
 		if (!down_read_trylock(&sbi->node_write))
 			goto redirty_out;
@@ -1385,8 +1532,6 @@
 		down_read(&sbi->node_write);
 	}
 
-	f2fs_get_node_info(sbi, nid, &ni);
-
 	/* This page is already truncated */
 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
 		ClearPageUptodate(page);
@@ -1396,11 +1541,22 @@
 		return 0;
 	}
 
+	if (__is_valid_data_blkaddr(ni.blk_addr) &&
+		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
+		goto redirty_out;
+
 	if (atomic && !test_opt(sbi, NOBARRIER))
 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
 
 	set_page_writeback(page);
 	ClearPageError(page);
+
+	if (f2fs_in_warm_node_list(sbi, page)) {
+		seq = f2fs_add_fsync_node_entry(sbi, page);
+		if (seq_id)
+			*seq_id = seq;
+	}
+
 	fio.old_blkaddr = ni.blk_addr;
 	f2fs_do_write_node_page(nid, &fio);
 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
@@ -1448,7 +1604,7 @@
 			goto out_page;
 
 		if (__write_node_page(node_page, false, NULL,
-					&wbc, false, FS_GC_NODE_IO))
+					&wbc, false, FS_GC_NODE_IO, NULL))
 			unlock_page(node_page);
 		goto release_page;
 	} else {
@@ -1465,11 +1621,13 @@
 static int f2fs_write_node_page(struct page *page,
 				struct writeback_control *wbc)
 {
-	return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
+	return __write_node_page(page, false, NULL, wbc, false,
+						FS_NODE_IO, NULL);
 }
 
 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
-			struct writeback_control *wbc, bool atomic)
+			struct writeback_control *wbc, bool atomic,
+			unsigned int *seq_id)
 {
 	pgoff_t index;
 	pgoff_t last_idx = ULONG_MAX;
@@ -1550,7 +1708,7 @@
 			ret = __write_node_page(page, atomic &&
 						page == last_page,
 						&submitted, wbc, true,
-						FS_NODE_IO);
+						FS_NODE_IO, seq_id);
 			if (ret) {
 				unlock_page(page);
 				f2fs_put_page(last_page, 0);
@@ -1667,7 +1825,7 @@
 			set_dentry_mark(page, 0);
 
 			ret = __write_node_page(page, false, &submitted,
-						wbc, do_balance, io_type);
+						wbc, do_balance, io_type, NULL);
 			if (ret)
 				unlock_page(page);
 			else if (submitted)
@@ -1686,10 +1844,12 @@
 	}
 
 	if (step < 2) {
+		if (wbc->sync_mode == WB_SYNC_NONE && step == 1)
+			goto out;
 		step++;
 		goto next_step;
 	}
-
+out:
 	if (nwritten)
 		f2fs_submit_merged_write(sbi, NODE);
 
@@ -1698,35 +1858,46 @@
 	return ret;
 }
 
-int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+						unsigned int seq_id)
 {
-	pgoff_t index = 0;
-	struct pagevec pvec;
+	struct fsync_node_entry *fn;
+	struct page *page;
+	struct list_head *head = &sbi->fsync_node_list;
+	unsigned long flags;
+	unsigned int cur_seq_id = 0;
 	int ret2, ret = 0;
-	int nr_pages;
 
-	pagevec_init(&pvec, 0);
-
-	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
-				PAGECACHE_TAG_WRITEBACK))) {
-		int i;
-
-		for (i = 0; i < nr_pages; i++) {
-			struct page *page = pvec.pages[i];
-
-			if (ino && ino_of_node(page) == ino) {
-				f2fs_wait_on_page_writeback(page, NODE, true);
-				if (TestClearPageError(page))
-					ret = -EIO;
-			}
+	while (seq_id && cur_seq_id < seq_id) {
+		spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+		if (list_empty(head)) {
+			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+			break;
 		}
-		pagevec_release(&pvec);
-		cond_resched();
+		fn = list_first_entry(head, struct fsync_node_entry, list);
+		if (fn->seq_id > seq_id) {
+			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+			break;
+		}
+		cur_seq_id = fn->seq_id;
+		page = fn->page;
+		get_page(page);
+		spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+
+		f2fs_wait_on_page_writeback(page, NODE, true);
+		if (TestClearPageError(page))
+			ret = -EIO;
+
+		put_page(page);
+
+		if (ret)
+			break;
 	}
 
 	ret2 = filemap_check_errors(NODE_MAPPING(sbi));
 	if (!ret)
 		ret = ret2;
+
 	return ret;
 }
 
@@ -1776,6 +1947,10 @@
 
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
+#ifdef CONFIG_F2FS_CHECK_FS
+	if (IS_INODE(page))
+		f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+#endif
 	if (!PageDirty(page)) {
 		__set_page_dirty_nobuffers(page);
 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
@@ -1970,7 +2145,7 @@
 		kmem_cache_free(free_nid_slab, i);
 }
 
-static void scan_nat_page(struct f2fs_sb_info *sbi,
+static int scan_nat_page(struct f2fs_sb_info *sbi,
 			struct page *nat_page, nid_t start_nid)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -1988,7 +2163,10 @@
 			break;
 
 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
-		f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
+
+		if (blk_addr == NEW_ADDR)
+			return -EINVAL;
+
 		if (blk_addr == NULL_ADDR) {
 			add_free_nid(sbi, start_nid, true, true);
 		} else {
@@ -1997,6 +2175,8 @@
 			spin_unlock(&NM_I(sbi)->nid_list_lock);
 		}
 	}
+
+	return 0;
 }
 
 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
@@ -2052,11 +2232,11 @@
 	up_read(&nm_i->nat_tree_lock);
 }
 
-static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
 						bool sync, bool mount)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
-	int i = 0;
+	int i = 0, ret;
 	nid_t nid = nm_i->next_scan_nid;
 
 	if (unlikely(nid >= nm_i->max_nid))
@@ -2064,17 +2244,17 @@
 
 	/* Enough entries */
 	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
-		return;
+		return 0;
 
 	if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
-		return;
+		return 0;
 
 	if (!mount) {
 		/* try to find free nids in free_nid_bitmap */
 		scan_free_nid_bits(sbi);
 
 		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
-			return;
+			return 0;
 	}
 
 	/* readahead nat pages to be scanned */
@@ -2088,8 +2268,16 @@
 						nm_i->nat_block_bitmap)) {
 			struct page *page = get_current_nat_page(sbi, nid);
 
-			scan_nat_page(sbi, page, nid);
+			ret = scan_nat_page(sbi, page, nid);
 			f2fs_put_page(page, 1);
+
+			if (ret) {
+				up_read(&nm_i->nat_tree_lock);
+				f2fs_bug_on(sbi, !mount);
+				f2fs_msg(sbi->sb, KERN_ERR,
+					"NAT is corrupt, run fsck to fix it");
+				return -EINVAL;
+			}
 		}
 
 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
@@ -2110,13 +2298,19 @@
 
 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
 					nm_i->ra_nid_pages, META_NAT, false);
+
+	return 0;
 }
 
-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
 {
+	int ret;
+
 	mutex_lock(&NM_I(sbi)->build_lock);
-	__f2fs_build_free_nids(sbi, sync, mount);
+	ret = __f2fs_build_free_nids(sbi, sync, mount);
 	mutex_unlock(&NM_I(sbi)->build_lock);
+
+	return ret;
 }
 
 /*
@@ -2129,12 +2323,11 @@
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct free_nid *i = NULL;
 retry:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
 		f2fs_show_injection_info(FAULT_ALLOC_NID);
 		return false;
 	}
-#endif
+
 	spin_lock(&nm_i->nid_list_lock);
 
 	if (unlikely(nm_i->available_nids == 0)) {
@@ -2279,12 +2472,16 @@
 	struct dnode_of_data dn;
 	struct node_info ni;
 	struct page *xpage;
+	int err;
 
 	if (!prev_xnid)
 		goto recover_xnid;
 
 	/* 1: invalidate the previous xattr nid */
-	f2fs_get_node_info(sbi, prev_xnid, &ni);
+	err = f2fs_get_node_info(sbi, prev_xnid, &ni);
+	if (err)
+		return err;
+
 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
 	dec_valid_node_count(sbi, inode, false);
 	set_node_addr(sbi, &ni, NULL_ADDR, false);
@@ -2319,8 +2516,11 @@
 	nid_t ino = ino_of_node(page);
 	struct node_info old_ni, new_ni;
 	struct page *ipage;
+	int err;
 
-	f2fs_get_node_info(sbi, ino, &old_ni);
+	err = f2fs_get_node_info(sbi, ino, &old_ni);
+	if (err)
+		return err;
 
 	if (unlikely(old_ni.blk_addr != NULL_ADDR))
 		return -EINVAL;
@@ -2374,7 +2574,7 @@
 	return 0;
 }
 
-void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
 			unsigned int segno, struct f2fs_summary_block *sum)
 {
 	struct f2fs_node *rn;
@@ -2396,6 +2596,9 @@
 		for (idx = addr; idx < addr + nrpages; idx++) {
 			struct page *page = f2fs_get_tmp_page(sbi, idx);
 
+			if (IS_ERR(page))
+				return PTR_ERR(page);
+
 			rn = F2FS_NODE(page);
 			sum_entry->nid = rn->footer.nid;
 			sum_entry->version = 0;
@@ -2407,6 +2610,7 @@
 		invalidate_mapping_pages(META_MAPPING(sbi), addr,
 							addr + nrpages);
 	}
+	return 0;
 }
 
 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
@@ -2584,6 +2788,13 @@
 	nid_t set_idx = 0;
 	LIST_HEAD(sets);
 
+	/* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
+	if (enabled_nat_bits(sbi, cpc)) {
+		down_write(&nm_i->nat_tree_lock);
+		remove_nats_in_journal(sbi);
+		up_write(&nm_i->nat_tree_lock);
+	}
+
 	if (!nm_i->dirty_nat_cnt)
 		return;
 
@@ -2636,7 +2847,13 @@
 	nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
 						nm_i->nat_bits_blocks;
 	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
-		struct page *page = f2fs_get_meta_page(sbi, nat_bits_addr++);
+		struct page *page;
+
+		page = f2fs_get_meta_page(sbi, nat_bits_addr++);
+		if (IS_ERR(page)) {
+			disable_nat_bits(sbi, true);
+			return PTR_ERR(page);
+		}
 
 		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
 					page_address(page), F2FS_BLKSIZE);
@@ -2720,6 +2937,7 @@
 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
 	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
 	INIT_LIST_HEAD(&nm_i->nat_entries);
+	spin_lock_init(&nm_i->nat_list_lock);
 
 	mutex_init(&nm_i->build_lock);
 	spin_lock_init(&nm_i->nid_list_lock);
@@ -2764,8 +2982,8 @@
 
 	for (i = 0; i < nm_i->nat_blocks; i++) {
 		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
-				NAT_ENTRY_BITMAP_SIZE_ALIGNED, GFP_KERNEL);
-		if (!nm_i->free_nid_bitmap)
+			f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
+		if (!nm_i->free_nid_bitmap[i])
 			return -ENOMEM;
 	}
 
@@ -2803,8 +3021,7 @@
 	/* load free nid status from nat_bits table */
 	load_free_nid_bitmap(sbi);
 
-	f2fs_build_free_nids(sbi, true, true);
-	return 0;
+	return f2fs_build_free_nids(sbi, true, true);
 }
 
 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
@@ -2839,8 +3056,13 @@
 		unsigned idx;
 
 		nid = nat_get_nid(natvec[found - 1]) + 1;
-		for (idx = 0; idx < found; idx++)
+		for (idx = 0; idx < found; idx++) {
+			spin_lock(&nm_i->nat_list_lock);
+			list_del(&natvec[idx]->list);
+			spin_unlock(&nm_i->nat_list_lock);
+
 			__del_from_nat_cache(nm_i, natvec[idx]);
+		}
 	}
 	f2fs_bug_on(sbi, nm_i->nat_cnt);
 
@@ -2895,8 +3117,15 @@
 			sizeof(struct nat_entry_set));
 	if (!nat_entry_set_slab)
 		goto destroy_free_nid;
+
+	fsync_node_entry_slab = f2fs_kmem_cache_create("fsync_node_entry",
+			sizeof(struct fsync_node_entry));
+	if (!fsync_node_entry_slab)
+		goto destroy_nat_entry_set;
 	return 0;
 
+destroy_nat_entry_set:
+	kmem_cache_destroy(nat_entry_set_slab);
 destroy_free_nid:
 	kmem_cache_destroy(free_nid_slab);
 destroy_nat_entry:
@@ -2907,6 +3136,7 @@
 
 void f2fs_destroy_node_manager_caches(void)
 {
+	kmem_cache_destroy(fsync_node_entry_slab);
 	kmem_cache_destroy(nat_entry_set_slab);
 	kmem_cache_destroy(free_nid_slab);
 	kmem_cache_destroy(nat_entry_slab);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index b95e49e..0f4db7a 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -135,6 +135,11 @@
 	return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
 }
 
+static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
+{
+	return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8;
+}
+
 enum mem_type {
 	FREE_NIDS,	/* indicates the free nid list */
 	NAT_ENTRIES,	/* indicates the cached nat entry */
@@ -444,6 +449,10 @@
 	else
 		flag &= ~(0x1 << type);
 	rn->footer.flag = cpu_to_le32(flag);
+
+#ifdef CONFIG_F2FS_CHECK_FS
+	f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+#endif
 }
 #define set_dentry_mark(page, mark)	set_mark(page, mark, DENT_BIT_SHIFT)
 #define set_fsync_mark(page, mark)	set_mark(page, mark, FSYNC_BIT_SHIFT)
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index daf81d4..501bb0f 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -241,8 +241,8 @@
 	struct page *page = NULL;
 	block_t blkaddr;
 	unsigned int loop_cnt = 0;
-	unsigned int free_blocks = sbi->user_block_count -
-					valid_user_blocks(sbi);
+	unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
+						valid_user_blocks(sbi);
 	int err = 0;
 
 	/* get node pages in the current segment */
@@ -252,10 +252,14 @@
 	while (1) {
 		struct fsync_inode_entry *entry;
 
-		if (!f2fs_is_valid_meta_blkaddr(sbi, blkaddr, META_POR))
+		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
 			return 0;
 
 		page = f2fs_get_tmp_page(sbi, blkaddr);
+		if (IS_ERR(page)) {
+			err = PTR_ERR(page);
+			break;
+		}
 
 		if (!is_recoverable_dnode(page))
 			break;
@@ -471,7 +475,10 @@
 
 	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
 
-	f2fs_get_node_info(sbi, dn.nid, &ni);
+	err = f2fs_get_node_info(sbi, dn.nid, &ni);
+	if (err)
+		goto err;
+
 	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
 	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
 
@@ -507,14 +514,13 @@
 		}
 
 		/* dest is valid block, try to recover from src to dest */
-		if (f2fs_is_valid_meta_blkaddr(sbi, dest, META_POR)) {
+		if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
 
 			if (src == NULL_ADDR) {
 				err = f2fs_reserve_new_block(&dn);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-				while (err)
+				while (err &&
+				       IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
 					err = f2fs_reserve_new_block(&dn);
-#endif
 				/* We should not get -ENOSPC */
 				f2fs_bug_on(sbi, err);
 				if (err)
@@ -568,12 +574,16 @@
 	while (1) {
 		struct fsync_inode_entry *entry;
 
-		if (!f2fs_is_valid_meta_blkaddr(sbi, blkaddr, META_POR))
+		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
 			break;
 
 		f2fs_ra_meta_pages_cond(sbi, blkaddr);
 
 		page = f2fs_get_tmp_page(sbi, blkaddr);
+		if (IS_ERR(page)) {
+			err = PTR_ERR(page);
+			break;
+		}
 
 		if (!is_recoverable_dnode(page)) {
 			f2fs_put_page(page, 1);
@@ -628,7 +638,8 @@
 #endif
 
 	if (s_flags & MS_RDONLY) {
-		f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
+		f2fs_msg(sbi->sb, KERN_INFO,
+				"recover fsync data on readonly fs");
 		sbi->sb->s_flags &= ~MS_RDONLY;
 	}
 
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 52e85a0..1da9a3c 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -250,7 +250,13 @@
 				err = -EAGAIN;
 				goto next;
 			}
-			f2fs_get_node_info(sbi, dn.nid, &ni);
+
+			err = f2fs_get_node_info(sbi, dn.nid, &ni);
+			if (err) {
+				f2fs_put_dnode(&dn);
+				return err;
+			}
+
 			if (cur->old_addr == NEW_ADDR) {
 				f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
@@ -439,8 +445,10 @@
 	int err;
 
 	f2fs_balance_fs(sbi, true);
-	f2fs_lock_op(sbi);
 
+	down_write(&fi->i_gc_rwsem[WRITE]);
+
+	f2fs_lock_op(sbi);
 	set_inode_flag(inode, FI_ATOMIC_COMMIT);
 
 	mutex_lock(&fi->inmem_lock);
@@ -455,6 +463,8 @@
 	clear_inode_flag(inode, FI_ATOMIC_COMMIT);
 
 	f2fs_unlock_op(sbi);
+	up_write(&fi->i_gc_rwsem[WRITE]);
+
 	return err;
 }
 
@@ -464,12 +474,10 @@
  */
 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
 		f2fs_show_injection_info(FAULT_CHECKPOINT);
 		f2fs_stop_checkpoint(sbi, false);
 	}
-#endif
 
 	/* balance_fs_bg is able to be pending */
 	if (need && excess_cached_nats(sbi))
@@ -503,7 +511,8 @@
 	else
 		f2fs_build_free_nids(sbi, false, false);
 
-	if (!is_idle(sbi) && !excess_dirty_nats(sbi))
+	if (!is_idle(sbi) &&
+		(!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi)))
 		return;
 
 	/* checkpoint is the only way to shrink partial cached entries */
@@ -511,6 +520,7 @@
 			!f2fs_available_free_memory(sbi, INO_ENTRIES) ||
 			excess_prefree_segs(sbi) ||
 			excess_dirty_nats(sbi) ||
+			excess_dirty_nodes(sbi) ||
 			f2fs_time_over(sbi, CP_TIME)) {
 		if (test_opt(sbi, DATA_FLUSH)) {
 			struct blk_plug plug;
@@ -831,9 +841,12 @@
 	dc->len = len;
 	dc->ref = 0;
 	dc->state = D_PREP;
+	dc->issuing = 0;
 	dc->error = 0;
 	init_completion(&dc->wait);
 	list_add_tail(&dc->list, pend_list);
+	spin_lock_init(&dc->lock);
+	dc->bio_ref = 0;
 	atomic_inc(&dcc->discard_cmd_cnt);
 	dcc->undiscard_blks += len;
 
@@ -860,7 +873,7 @@
 							struct discard_cmd *dc)
 {
 	if (dc->state == D_DONE)
-		atomic_dec(&dcc->issing_discard);
+		atomic_sub(dc->issuing, &dcc->issing_discard);
 
 	list_del(&dc->list);
 	rb_erase(&dc->rb_node, &dcc->root);
@@ -875,9 +888,17 @@
 							struct discard_cmd *dc)
 {
 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+	unsigned long flags;
 
 	trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
 
+	spin_lock_irqsave(&dc->lock, flags);
+	if (dc->bio_ref) {
+		spin_unlock_irqrestore(&dc->lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&dc->lock, flags);
+
 	f2fs_bug_on(sbi, dc->ref);
 
 	if (dc->error == -EOPNOTSUPP)
@@ -893,10 +914,17 @@
 static void f2fs_submit_discard_endio(struct bio *bio)
 {
 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
+	unsigned long flags;
 
 	dc->error = bio->bi_error;
-	dc->state = D_DONE;
-	complete_all(&dc->wait);
+
+	spin_lock_irqsave(&dc->lock, flags);
+	dc->bio_ref--;
+	if (!dc->bio_ref && dc->state == D_SUBMIT) {
+		dc->state = D_DONE;
+		complete_all(&dc->wait);
+	}
+	spin_unlock_irqrestore(&dc->lock, flags);
 	bio_put(bio);
 }
 
@@ -934,6 +962,7 @@
 	/* common policy */
 	dpolicy->type = discard_type;
 	dpolicy->sync = true;
+	dpolicy->ordered = false;
 	dpolicy->granularity = granularity;
 
 	dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
@@ -945,6 +974,7 @@
 		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
 		dpolicy->io_aware = true;
 		dpolicy->sync = false;
+		dpolicy->ordered = true;
 		if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
 			dpolicy->granularity = 1;
 			dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
@@ -962,48 +992,115 @@
 	}
 }
 
-
+static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
+				struct block_device *bdev, block_t lstart,
+				block_t start, block_t len);
 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
-static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
+static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
 						struct discard_policy *dpolicy,
-						struct discard_cmd *dc)
+						struct discard_cmd *dc,
+						unsigned int *issued)
 {
+	struct block_device *bdev = dc->bdev;
+	struct request_queue *q = bdev_get_queue(bdev);
+	unsigned int max_discard_blocks =
+			SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
 					&(dcc->fstrim_list) : &(dcc->wait_list);
-	struct bio *bio = NULL;
 	int flag = dpolicy->sync ? REQ_SYNC : 0;
+	block_t lstart, start, len, total_len;
+	int err = 0;
 
 	if (dc->state != D_PREP)
-		return;
+		return 0;
 
 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
-		return;
+		return 0;
 
-	trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
+	trace_f2fs_issue_discard(bdev, dc->start, dc->len);
 
-	dc->error = __blkdev_issue_discard(dc->bdev,
-				SECTOR_FROM_BLOCK(dc->start),
-				SECTOR_FROM_BLOCK(dc->len),
-				GFP_NOFS, 0, &bio);
-	if (!dc->error) {
-		/* should keep before submission to avoid D_DONE right away */
-		dc->state = D_SUBMIT;
-		atomic_inc(&dcc->issued_discard);
-		atomic_inc(&dcc->issing_discard);
-		if (bio) {
-			bio->bi_private = dc;
-			bio->bi_end_io = f2fs_submit_discard_endio;
-			bio->bi_opf |= flag;
-			submit_bio(bio);
-			list_move_tail(&dc->list, wait_list);
-			__check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
+	lstart = dc->lstart;
+	start = dc->start;
+	len = dc->len;
+	total_len = len;
 
-			f2fs_update_iostat(sbi, FS_DISCARD, 1);
+	dc->len = 0;
+
+	while (total_len && *issued < dpolicy->max_requests && !err) {
+		struct bio *bio = NULL;
+		unsigned long flags;
+		bool last = true;
+
+		if (len > max_discard_blocks) {
+			len = max_discard_blocks;
+			last = false;
 		}
-	} else {
-		__remove_discard_cmd(sbi, dc);
+
+		(*issued)++;
+		if (*issued == dpolicy->max_requests)
+			last = true;
+
+		dc->len += len;
+
+		if (time_to_inject(sbi, FAULT_DISCARD)) {
+			f2fs_show_injection_info(FAULT_DISCARD);
+			err = -EIO;
+			goto submit;
+		}
+		err = __blkdev_issue_discard(bdev,
+					SECTOR_FROM_BLOCK(start),
+					SECTOR_FROM_BLOCK(len),
+					GFP_NOFS, 0, &bio);
+submit:
+		if (err) {
+			spin_lock_irqsave(&dc->lock, flags);
+			if (dc->state == D_PARTIAL)
+				dc->state = D_SUBMIT;
+			spin_unlock_irqrestore(&dc->lock, flags);
+
+			break;
+		}
+
+		f2fs_bug_on(sbi, !bio);
+
+		/*
+		 * should keep before submission to avoid D_DONE
+		 * right away
+		 */
+		spin_lock_irqsave(&dc->lock, flags);
+		if (last)
+			dc->state = D_SUBMIT;
+		else
+			dc->state = D_PARTIAL;
+		dc->bio_ref++;
+		spin_unlock_irqrestore(&dc->lock, flags);
+
+		atomic_inc(&dcc->issing_discard);
+		dc->issuing++;
+		list_move_tail(&dc->list, wait_list);
+
+		/* sanity check on discard range */
+		__check_sit_bitmap(sbi, start, start + len);
+
+		bio->bi_private = dc;
+		bio->bi_end_io = f2fs_submit_discard_endio;
+		bio->bi_opf |= flag;
+		submit_bio(bio);
+
+		atomic_inc(&dcc->issued_discard);
+
+		f2fs_update_iostat(sbi, FS_DISCARD, 1);
+
+		lstart += len;
+		start += len;
+		total_len -= len;
+		len = total_len;
 	}
+
+	if (!err && len)
+		__update_discard_tree_range(sbi, bdev, lstart, start, len);
+	return err;
 }
 
 static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
@@ -1084,10 +1181,11 @@
 	struct discard_cmd *dc;
 	struct discard_info di = {0};
 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
+	struct request_queue *q = bdev_get_queue(bdev);
+	unsigned int max_discard_blocks =
+			SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
 	block_t end = lstart + len;
 
-	mutex_lock(&dcc->cmd_lock);
-
 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
 					NULL, lstart,
 					(struct rb_entry **)&prev_dc,
@@ -1127,7 +1225,8 @@
 
 		if (prev_dc && prev_dc->state == D_PREP &&
 			prev_dc->bdev == bdev &&
-			__is_discard_back_mergeable(&di, &prev_dc->di)) {
+			__is_discard_back_mergeable(&di, &prev_dc->di,
+							max_discard_blocks)) {
 			prev_dc->di.len += di.len;
 			dcc->undiscard_blks += di.len;
 			__relocate_discard_cmd(dcc, prev_dc);
@@ -1138,7 +1237,8 @@
 
 		if (next_dc && next_dc->state == D_PREP &&
 			next_dc->bdev == bdev &&
-			__is_discard_front_mergeable(&di, &next_dc->di)) {
+			__is_discard_front_mergeable(&di, &next_dc->di,
+							max_discard_blocks)) {
 			next_dc->di.lstart = di.lstart;
 			next_dc->di.len += di.len;
 			next_dc->di.start = di.start;
@@ -1161,8 +1261,6 @@
 		node = rb_next(&prev_dc->rb_node);
 		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
 	}
-
-	mutex_unlock(&dcc->cmd_lock);
 }
 
 static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
@@ -1177,10 +1275,72 @@
 
 		blkstart -= FDEV(devi).start_blk;
 	}
+	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
+	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
 	return 0;
 }
 
+static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
+					struct discard_policy *dpolicy)
+{
+	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
+	struct rb_node **insert_p = NULL, *insert_parent = NULL;
+	struct discard_cmd *dc;
+	struct blk_plug plug;
+	unsigned int pos = dcc->next_pos;
+	unsigned int issued = 0;
+	bool io_interrupted = false;
+
+	mutex_lock(&dcc->cmd_lock);
+	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
+					NULL, pos,
+					(struct rb_entry **)&prev_dc,
+					(struct rb_entry **)&next_dc,
+					&insert_p, &insert_parent, true);
+	if (!dc)
+		dc = next_dc;
+
+	blk_start_plug(&plug);
+
+	while (dc) {
+		struct rb_node *node;
+		int err = 0;
+
+		if (dc->state != D_PREP)
+			goto next;
+
+		if (dpolicy->io_aware && !is_idle(sbi)) {
+			io_interrupted = true;
+			break;
+		}
+
+		dcc->next_pos = dc->lstart + dc->len;
+		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
+
+		if (issued >= dpolicy->max_requests)
+			break;
+next:
+		node = rb_next(&dc->rb_node);
+		if (err)
+			__remove_discard_cmd(sbi, dc);
+		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
+	}
+
+	blk_finish_plug(&plug);
+
+	if (!dc)
+		dcc->next_pos = 0;
+
+	mutex_unlock(&dcc->cmd_lock);
+
+	if (!issued && io_interrupted)
+		issued = -1;
+
+	return issued;
+}
+
 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
 					struct discard_policy *dpolicy)
 {
@@ -1188,19 +1348,24 @@
 	struct list_head *pend_list;
 	struct discard_cmd *dc, *tmp;
 	struct blk_plug plug;
-	int i, iter = 0, issued = 0;
+	int i, issued = 0;
 	bool io_interrupted = false;
 
 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
 		if (i + 1 < dpolicy->granularity)
 			break;
+
+		if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
+			return __issue_discard_cmd_orderly(sbi, dpolicy);
+
 		pend_list = &dcc->pend_list[i];
 
 		mutex_lock(&dcc->cmd_lock);
 		if (list_empty(pend_list))
 			goto next;
-		f2fs_bug_on(sbi,
-			!f2fs_check_rb_tree_consistence(sbi, &dcc->root));
+		if (unlikely(dcc->rbtree_check))
+			f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+								&dcc->root));
 		blk_start_plug(&plug);
 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
 			f2fs_bug_on(sbi, dc->state != D_PREP);
@@ -1208,20 +1373,19 @@
 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
 								!is_idle(sbi)) {
 				io_interrupted = true;
-				goto skip;
+				break;
 			}
 
-			__submit_discard_cmd(sbi, dpolicy, dc);
-			issued++;
-skip:
-			if (++iter >= dpolicy->max_requests)
+			__submit_discard_cmd(sbi, dpolicy, dc, &issued);
+
+			if (issued >= dpolicy->max_requests)
 				break;
 		}
 		blk_finish_plug(&plug);
 next:
 		mutex_unlock(&dcc->cmd_lock);
 
-		if (iter >= dpolicy->max_requests)
+		if (issued >= dpolicy->max_requests || io_interrupted)
 			break;
 	}
 
@@ -1319,21 +1483,22 @@
 	return trimmed;
 }
 
-static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
+static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
 						struct discard_policy *dpolicy)
 {
 	struct discard_policy dp;
+	unsigned int discard_blks;
 
-	if (dpolicy) {
-		__wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
-		return;
-	}
+	if (dpolicy)
+		return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
 
 	/* wait all */
 	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
-	__wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+	discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
 	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
-	__wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+	discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+
+	return discard_blks;
 }
 
 /* This should be covered by global mutex, &sit_i->sentry_lock */
@@ -1386,6 +1551,8 @@
 
 	/* just to make sure there is no pending discard commands */
 	__wait_all_discard_cmd(sbi, NULL);
+
+	f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
 	return dropped;
 }
 
@@ -1649,21 +1816,30 @@
 	unsigned int start = 0, end = -1;
 	unsigned int secno, start_segno;
 	bool force = (cpc->reason & CP_DISCARD);
+	bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
 
 	mutex_lock(&dirty_i->seglist_lock);
 
 	while (1) {
 		int i;
+
+		if (need_align && end != -1)
+			end--;
 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
 		if (start >= MAIN_SEGS(sbi))
 			break;
 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
 								start + 1);
 
-		for (i = start; i < end; i++)
-			clear_bit(i, prefree_map);
+		if (need_align) {
+			start = rounddown(start, sbi->segs_per_sec);
+			end = roundup(end, sbi->segs_per_sec);
+		}
 
-		dirty_i->nr_dirty[PRE] -= end - start;
+		for (i = start; i < end; i++) {
+			if (test_and_clear_bit(i, prefree_map))
+				dirty_i->nr_dirty[PRE]--;
+		}
 
 		if (!test_opt(sbi, DISCARD))
 			continue;
@@ -1757,7 +1933,9 @@
 	dcc->nr_discards = 0;
 	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
 	dcc->undiscard_blks = 0;
+	dcc->next_pos = 0;
 	dcc->root = RB_ROOT;
+	dcc->rbtree_check = false;
 
 	init_waitqueue_head(&dcc->discard_wait_queue);
 	SM_I(sbi)->dcc_info = dcc;
@@ -1907,6 +2085,8 @@
 	if (addr == NEW_ADDR)
 		return;
 
+	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
+
 	/* add it into sit main buffer */
 	down_write(&sit_i->sentry_lock);
 
@@ -1925,7 +2105,7 @@
 	struct seg_entry *se;
 	bool is_cp = false;
 
-	if (!is_valid_blkaddr(blkaddr))
+	if (!is_valid_data_blkaddr(sbi, blkaddr))
 		return true;
 
 	down_read(&sit_i->sentry_lock);
@@ -1989,7 +2169,7 @@
  */
 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
 {
-	return f2fs_get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
+	return f2fs_get_meta_page_nofail(sbi, GET_SUM_BLOCK(sbi, segno));
 }
 
 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
@@ -2372,7 +2552,7 @@
 	return has_candidate;
 }
 
-static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
+static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
 					struct discard_policy *dpolicy,
 					unsigned int start, unsigned int end)
 {
@@ -2382,12 +2562,15 @@
 	struct discard_cmd *dc;
 	struct blk_plug plug;
 	int issued;
+	unsigned int trimmed = 0;
 
 next:
 	issued = 0;
 
 	mutex_lock(&dcc->cmd_lock);
-	f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, &dcc->root));
+	if (unlikely(dcc->rbtree_check))
+		f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+								&dcc->root));
 
 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
 					NULL, start,
@@ -2401,6 +2584,7 @@
 
 	while (dc && dc->lstart <= end) {
 		struct rb_node *node;
+		int err = 0;
 
 		if (dc->len < dpolicy->granularity)
 			goto skip;
@@ -2410,19 +2594,24 @@
 			goto skip;
 		}
 
-		__submit_discard_cmd(sbi, dpolicy, dc);
+		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
 
-		if (++issued >= dpolicy->max_requests) {
+		if (issued >= dpolicy->max_requests) {
 			start = dc->lstart + dc->len;
 
+			if (err)
+				__remove_discard_cmd(sbi, dc);
+
 			blk_finish_plug(&plug);
 			mutex_unlock(&dcc->cmd_lock);
-			__wait_all_discard_cmd(sbi, NULL);
+			trimmed += __wait_all_discard_cmd(sbi, NULL);
 			congestion_wait(BLK_RW_ASYNC, HZ/50);
 			goto next;
 		}
 skip:
 		node = rb_next(&dc->rb_node);
+		if (err)
+			__remove_discard_cmd(sbi, dc);
 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
 
 		if (fatal_signal_pending(current))
@@ -2431,6 +2620,8 @@
 
 	blk_finish_plug(&plug);
 	mutex_unlock(&dcc->cmd_lock);
+
+	return trimmed;
 }
 
 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
@@ -2443,12 +2634,13 @@
 	struct discard_policy dpolicy;
 	unsigned long long trimmed = 0;
 	int err = 0;
+	bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
 
 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
 		return -EINVAL;
 
-	if (end <= MAIN_BLKADDR(sbi))
-		return -EINVAL;
+	if (end < MAIN_BLKADDR(sbi))
+		goto out;
 
 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
 		f2fs_msg(sbi->sb, KERN_WARNING,
@@ -2460,6 +2652,10 @@
 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
 						GET_SEGNO(sbi, end);
+	if (need_align) {
+		start_segno = rounddown(start_segno, sbi->segs_per_sec);
+		end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
+	}
 
 	cpc.reason = CP_DISCARD;
 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
@@ -2475,24 +2671,27 @@
 	if (err)
 		goto out;
 
-	start_block = START_BLOCK(sbi, start_segno);
-	end_block = START_BLOCK(sbi, end_segno + 1);
-
-	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
-	__issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
-
 	/*
 	 * We filed discard candidates, but actually we don't need to wait for
 	 * all of them, since they'll be issued in idle time along with runtime
 	 * discard option. User configuration looks like using runtime discard
 	 * or periodic fstrim instead of it.
 	 */
-	if (!test_opt(sbi, DISCARD)) {
-		trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
+	if (test_opt(sbi, DISCARD))
+		goto out;
+
+	start_block = START_BLOCK(sbi, start_segno);
+	end_block = START_BLOCK(sbi, end_segno + 1);
+
+	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
+	trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
 					start_block, end_block);
-		range->len = F2FS_BLK_TO_BYTES(trimmed);
-	}
+
+	trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
+					start_block, end_block);
 out:
+	if (!err)
+		range->len = F2FS_BLK_TO_BYTES(trimmed);
 	return err;
 }
 
@@ -2645,8 +2844,8 @@
 			return CURSEG_COLD_DATA;
 		if (file_is_hot(inode) ||
 				is_inode_flag_set(inode, FI_HOT_DATA) ||
-				is_inode_flag_set(inode, FI_ATOMIC_FILE) ||
-				is_inode_flag_set(inode, FI_VOLATILE_FILE))
+				f2fs_is_atomic_file(inode) ||
+				f2fs_is_volatile_file(inode))
 			return CURSEG_HOT_DATA;
 		/* f2fs_rw_hint_to_seg_type(inode->i_write_hint); */
 		return CURSEG_WARM_DATA;
@@ -2788,6 +2987,9 @@
 reallocate:
 	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
 			&fio->new_blkaddr, sum, type, fio, true);
+	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
+		invalidate_mapping_pages(META_MAPPING(fio->sbi),
+					fio->old_blkaddr, fio->old_blkaddr);
 
 	/* writeout dirty page into bdev */
 	f2fs_submit_page_write(fio);
@@ -2843,11 +3045,9 @@
 {
 	struct f2fs_sb_info *sbi = fio->sbi;
 	struct f2fs_summary sum;
-	struct node_info ni;
 
 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
-	f2fs_get_node_info(sbi, dn->nid, &ni);
-	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
+	set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
 	do_write_page(&sum, fio);
 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
 
@@ -2944,8 +3144,11 @@
 
 	if (!recover_curseg || recover_newaddr)
 		update_sit_entry(sbi, new_blkaddr, 1);
-	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
+		invalidate_mapping_pages(META_MAPPING(sbi),
+					old_blkaddr, old_blkaddr);
 		update_sit_entry(sbi, old_blkaddr, -1);
+	}
 
 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
@@ -2999,7 +3202,7 @@
 {
 	struct page *cpage;
 
-	if (!is_valid_blkaddr(blkaddr))
+	if (!is_valid_data_blkaddr(sbi, blkaddr))
 		return;
 
 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
@@ -3009,7 +3212,7 @@
 	}
 }
 
-static void read_compacted_summaries(struct f2fs_sb_info *sbi)
+static int read_compacted_summaries(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 	struct curseg_info *seg_i;
@@ -3021,6 +3224,8 @@
 	start = start_sum_block(sbi);
 
 	page = f2fs_get_meta_page(sbi, start++);
+	if (IS_ERR(page))
+		return PTR_ERR(page);
 	kaddr = (unsigned char *)page_address(page);
 
 	/* Step 1: restore nat cache */
@@ -3061,11 +3266,14 @@
 			page = NULL;
 
 			page = f2fs_get_meta_page(sbi, start++);
+			if (IS_ERR(page))
+				return PTR_ERR(page);
 			kaddr = (unsigned char *)page_address(page);
 			offset = 0;
 		}
 	}
 	f2fs_put_page(page, 1);
+	return 0;
 }
 
 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
@@ -3077,6 +3285,7 @@
 	unsigned short blk_off;
 	unsigned int segno = 0;
 	block_t blk_addr = 0;
+	int err = 0;
 
 	/* get segment number and block addr */
 	if (IS_DATASEG(type)) {
@@ -3100,6 +3309,8 @@
 	}
 
 	new = f2fs_get_meta_page(sbi, blk_addr);
+	if (IS_ERR(new))
+		return PTR_ERR(new);
 	sum = (struct f2fs_summary_block *)page_address(new);
 
 	if (IS_NODESEG(type)) {
@@ -3111,7 +3322,9 @@
 				ns->ofs_in_node = 0;
 			}
 		} else {
-			f2fs_restore_node_summary(sbi, segno, sum);
+			err = f2fs_restore_node_summary(sbi, segno, sum);
+			if (err)
+				goto out;
 		}
 	}
 
@@ -3131,8 +3344,9 @@
 	curseg->alloc_type = ckpt->alloc_type[type];
 	curseg->next_blkoff = blk_off;
 	mutex_unlock(&curseg->curseg_mutex);
+out:
 	f2fs_put_page(new, 1);
-	return 0;
+	return err;
 }
 
 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
@@ -3150,7 +3364,9 @@
 							META_CP, true);
 
 		/* restore for compacted data summary */
-		read_compacted_summaries(sbi);
+		err = read_compacted_summaries(sbi);
+		if (err)
+			return err;
 		type = CURSEG_HOT_NODE;
 	}
 
@@ -3281,7 +3497,7 @@
 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
 					unsigned int segno)
 {
-	return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
+	return f2fs_get_meta_page_nofail(sbi, current_sit_addr(sbi, segno));
 }
 
 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 38c549d..b3d9e31 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -85,7 +85,7 @@
 	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
 
 #define GET_SEGNO(sbi, blk_addr)					\
-	((!is_valid_blkaddr(blk_addr)) ?			\
+	((!is_valid_data_blkaddr(sbi, blk_addr)) ?			\
 	NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),			\
 		GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
 #define BLKS_PER_SEC(sbi)					\
@@ -215,7 +215,7 @@
 #define IS_DUMMY_WRITTEN_PAGE(page)			\
 		(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
 
-#define MAX_SKIP_ATOMIC_COUNT			16
+#define MAX_SKIP_GC_COUNT			16
 
 struct inmem_pages {
 	struct list_head list;
@@ -648,13 +648,10 @@
 {
 	struct f2fs_sb_info *sbi = fio->sbi;
 
-	if (PAGE_TYPE_OF_BIO(fio->type) == META &&
-				(!is_read_io(fio->op) || fio->is_meta))
-		BUG_ON(blk_addr < SEG0_BLKADDR(sbi) ||
-				blk_addr >= MAIN_BLKADDR(sbi));
+	if (__is_meta_io(fio))
+		verify_blkaddr(sbi, blk_addr, META_GENERIC);
 	else
-		BUG_ON(blk_addr < MAIN_BLKADDR(sbi) ||
-				blk_addr >= MAX_BLKADDR(sbi));
+		verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
 }
 
 /*
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 343a5cf..bba0cd4 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -41,7 +41,7 @@
 
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 
-char *fault_name[FAULT_MAX] = {
+char *f2fs_fault_name[FAULT_MAX] = {
 	[FAULT_KMALLOC]		= "kmalloc",
 	[FAULT_KVMALLOC]	= "kvmalloc",
 	[FAULT_PAGE_ALLOC]	= "page alloc",
@@ -55,20 +55,24 @@
 	[FAULT_TRUNCATE]	= "truncate fail",
 	[FAULT_IO]		= "IO error",
 	[FAULT_CHECKPOINT]	= "checkpoint error",
+	[FAULT_DISCARD]		= "discard error",
 };
 
-static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
-						unsigned int rate)
+void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+							unsigned int type)
 {
 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
 
 	if (rate) {
 		atomic_set(&ffi->inject_ops, 0);
 		ffi->inject_rate = rate;
-		ffi->inject_type = (1 << FAULT_MAX) - 1;
-	} else {
-		memset(ffi, 0, sizeof(struct f2fs_fault_info));
 	}
+
+	if (type)
+		ffi->inject_type = type;
+
+	if (!rate && !type)
+		memset(ffi, 0, sizeof(struct f2fs_fault_info));
 }
 #endif
 
@@ -113,6 +117,7 @@
 	Opt_mode,
 	Opt_io_size_bits,
 	Opt_fault_injection,
+	Opt_fault_type,
 	Opt_lazytime,
 	Opt_nolazytime,
 	Opt_quota,
@@ -170,6 +175,7 @@
 	{Opt_mode, "mode=%s"},
 	{Opt_io_size_bits, "io_bits=%u"},
 	{Opt_fault_injection, "fault_injection=%u"},
+	{Opt_fault_type, "fault_type=%u"},
 	{Opt_lazytime, "lazytime"},
 	{Opt_nolazytime, "nolazytime"},
 	{Opt_quota, "quota"},
@@ -347,12 +353,6 @@
 			"QUOTA feature is enabled, so ignore jquota_fmt");
 		F2FS_OPTION(sbi).s_jquota_fmt = 0;
 	}
-	if (f2fs_sb_has_quota_ino(sbi->sb) && f2fs_readonly(sbi->sb)) {
-		f2fs_msg(sbi->sb, KERN_INFO,
-			 "Filesystem with quota feature cannot be mounted RDWR "
-			 "without CONFIG_QUOTA");
-		return -1;
-	}
 	return 0;
 }
 #endif
@@ -606,7 +606,18 @@
 			if (args->from && match_int(args, &arg))
 				return -EINVAL;
 #ifdef CONFIG_F2FS_FAULT_INJECTION
-			f2fs_build_fault_attr(sbi, arg);
+			f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
+			set_opt(sbi, FAULT_INJECTION);
+#else
+			f2fs_msg(sb, KERN_INFO,
+				"FAULT_INJECTION was not selected");
+#endif
+			break;
+		case Opt_fault_type:
+			if (args->from && match_int(args, &arg))
+				return -EINVAL;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+			f2fs_build_fault_attr(sbi, 0, arg);
 			set_opt(sbi, FAULT_INJECTION);
 #else
 			f2fs_msg(sb, KERN_INFO,
@@ -775,6 +786,19 @@
 #ifdef CONFIG_QUOTA
 	if (f2fs_check_quota_options(sbi))
 		return -EINVAL;
+#else
+	if (f2fs_sb_has_quota_ino(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+		f2fs_msg(sbi->sb, KERN_INFO,
+			 "Filesystem with quota feature cannot be mounted RDWR "
+			 "without CONFIG_QUOTA");
+		return -EINVAL;
+	}
+	if (f2fs_sb_has_project_quota(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+		f2fs_msg(sb, KERN_ERR,
+			"Filesystem with project quota feature cannot be "
+			"mounted RDWR without CONFIG_QUOTA");
+		return -EINVAL;
+	}
 #endif
 
 	if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
@@ -1030,6 +1054,10 @@
 	/* our cp_error case, we can wait for any writeback page */
 	f2fs_flush_merged_writes(sbi);
 
+	f2fs_wait_on_all_pages_writeback(sbi);
+
+	f2fs_bug_on(sbi, sbi->fsync_node_num);
+
 	iput(sbi->node_inode);
 	iput(sbi->meta_inode);
 
@@ -1310,9 +1338,12 @@
 	if (F2FS_IO_SIZE_BITS(sbi))
 		seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
 #ifdef CONFIG_F2FS_FAULT_INJECTION
-	if (test_opt(sbi, FAULT_INJECTION))
+	if (test_opt(sbi, FAULT_INJECTION)) {
 		seq_printf(seq, ",fault_injection=%u",
 				F2FS_OPTION(sbi).fault_info.inject_rate);
+		seq_printf(seq, ",fault_type=%u",
+				F2FS_OPTION(sbi).fault_info.inject_type);
+	}
 #endif
 #ifdef CONFIG_QUOTA
 	if (test_opt(sbi, QUOTA))
@@ -1357,7 +1388,8 @@
 	F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
 	F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
 	F2FS_OPTION(sbi).test_dummy_encryption = false;
-	sbi->readdir_ra = 1;
+	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
+	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
 
 	set_opt(sbi, BG_GC);
 	set_opt(sbi, INLINE_XATTR);
@@ -1367,12 +1399,12 @@
 	set_opt(sbi, NOHEAP);
 	sbi->sb->s_flags |= MS_LAZYTIME;
 	set_opt(sbi, FLUSH_MERGE);
-	if (f2fs_sb_has_blkzoned(sbi->sb)) {
-		set_opt_mode(sbi, F2FS_MOUNT_LFS);
+	if (blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev)))
 		set_opt(sbi, DISCARD);
-	} else {
+	if (f2fs_sb_has_blkzoned(sbi->sb))
+		set_opt_mode(sbi, F2FS_MOUNT_LFS);
+	else
 		set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
-	}
 
 #ifdef CONFIG_F2FS_FS_XATTR
 	set_opt(sbi, XATTR_USER);
@@ -1381,9 +1413,7 @@
 	set_opt(sbi, POSIX_ACL);
 #endif
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-	f2fs_build_fault_attr(sbi, 0);
-#endif
+	f2fs_build_fault_attr(sbi, 0, 0);
 }
 
 #ifdef CONFIG_QUOTA
@@ -2231,9 +2261,9 @@
 		return 1;
 	}
 
-	if (secs_per_zone > total_sections) {
+	if (secs_per_zone > total_sections || !secs_per_zone) {
 		f2fs_msg(sb, KERN_INFO,
-			"Wrong secs_per_zone (%u > %u)",
+			"Wrong secs_per_zone / total_sections (%u, %u)",
 			secs_per_zone, total_sections);
 		return 1;
 	}
@@ -2287,6 +2317,9 @@
 	unsigned int sit_segs, nat_segs;
 	unsigned int sit_bitmap_size, nat_bitmap_size;
 	unsigned int log_blocks_per_seg;
+	unsigned int segment_count_main;
+	unsigned int cp_pack_start_sum, cp_payload;
+	block_t user_block_count;
 	int i;
 
 	total = le32_to_cpu(raw_super->segment_count);
@@ -2311,6 +2344,16 @@
 		return 1;
 	}
 
+	user_block_count = le64_to_cpu(ckpt->user_block_count);
+	segment_count_main = le32_to_cpu(raw_super->segment_count_main);
+	log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+	if (!user_block_count || user_block_count >=
+			segment_count_main << log_blocks_per_seg) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"Wrong user_block_count: %u", user_block_count);
+		return 1;
+	}
+
 	main_segs = le32_to_cpu(raw_super->segment_count_main);
 	blocks_per_seg = sbi->blocks_per_seg;
 
@@ -2327,7 +2370,6 @@
 
 	sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
 	nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
-	log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
 
 	if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
 		nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
@@ -2337,6 +2379,17 @@
 		return 1;
 	}
 
+	cp_pack_start_sum = __start_sum_addr(sbi);
+	cp_payload = __cp_payload(sbi);
+	if (cp_pack_start_sum < cp_payload + 1 ||
+		cp_pack_start_sum > blocks_per_seg - 1 -
+			NR_CURSEG_TYPE) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"Wrong cp_pack_start_sum: %u",
+			cp_pack_start_sum);
+		return 1;
+	}
+
 	if (unlikely(f2fs_cp_error(sbi))) {
 		f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
 		return 1;
@@ -2674,6 +2727,8 @@
 		sm_i->dcc_info->discard_granularity = 1;
 		sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
 	}
+
+	sbi->readdir_ra = 1;
 }
 
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -2723,9 +2778,6 @@
 	sb->s_fs_info = sbi;
 	sbi->raw_super = raw_super;
 
-	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
-	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
-
 	/* precompute checksum seed for metadata */
 	if (f2fs_sb_has_inode_chksum(sb))
 		sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
@@ -2889,6 +2941,8 @@
 
 	f2fs_init_ino_entry_info(sbi);
 
+	f2fs_init_fsync_node_info(sbi);
+
 	/* setup f2fs internal modules */
 	err = f2fs_build_segment_manager(sbi);
 	if (err) {
@@ -2935,10 +2989,11 @@
 		err = PTR_ERR(root);
 		goto free_stats;
 	}
-	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
+	if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
+			!root->i_size || !root->i_nlink) {
 		iput(root);
 		err = -EINVAL;
-		goto free_node_inode;
+		goto free_stats;
 	}
 
 	sb->s_root = d_make_root(root); /* allocate root dentry */
@@ -2952,10 +3007,7 @@
 		goto free_root_inode;
 
 #ifdef CONFIG_QUOTA
-	/*
-	 * Turn on quotas which were not enabled for read-only mounts if
-	 * filesystem has quota feature, so that they are updated correctly.
-	 */
+	/* Enable quota usage during mount */
 	if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
 		err = f2fs_enable_quotas(sb);
 		if (err) {
@@ -3113,9 +3165,19 @@
 static void kill_f2fs_super(struct super_block *sb)
 {
 	if (sb->s_root) {
-		set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
-		f2fs_stop_gc_thread(F2FS_SB(sb));
-		f2fs_stop_discard_thread(F2FS_SB(sb));
+		struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+		set_sbi_flag(sbi, SBI_IS_CLOSE);
+		f2fs_stop_gc_thread(sbi);
+		f2fs_stop_discard_thread(sbi);
+
+		if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
+				!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+			struct cp_control cpc = {
+				.reason = CP_UMOUNT,
+			};
+			f2fs_write_checkpoint(sbi, &cpc);
+		}
 	}
 	kill_block_super(sb);
 }
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index e1b1b31..30fd016 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -9,6 +9,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/compiler.h>
 #include <linux/proc_fs.h>
 #include <linux/f2fs_fs.h>
 #include <linux/seq_file.h>
@@ -252,6 +253,7 @@
 		if (t >= 1) {
 			sbi->gc_mode = GC_URGENT;
 			if (sbi->gc_thread) {
+				sbi->gc_thread->gc_wake = 1;
 				wake_up_interruptible_all(
 					&sbi->gc_thread->gc_wait_queue_head);
 				wake_up_discard_thread(sbi, true);
@@ -286,8 +288,10 @@
 	bool gc_entry = (!strcmp(a->attr.name, "gc_urgent") ||
 					a->struct_type == GC_THREAD);
 
-	if (gc_entry)
-		down_read(&sbi->sb->s_umount);
+	if (gc_entry) {
+		if (!down_read_trylock(&sbi->sb->s_umount))
+			return -EAGAIN;
+	}
 	ret = __sbi_store(a, sbi, buf, count);
 	if (gc_entry)
 		up_read(&sbi->sb->s_umount);
@@ -518,7 +522,8 @@
 	.kset	= &f2fs_kset,
 };
 
-static int segment_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
+						void *offset)
 {
 	struct super_block *sb = seq->private;
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -545,7 +550,8 @@
 	return 0;
 }
 
-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
+						void *offset)
 {
 	struct super_block *sb = seq->private;
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -569,7 +575,8 @@
 	return 0;
 }
 
-static int iostat_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
+					       void *offset)
 {
 	struct super_block *sb = seq->private;
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -611,6 +618,28 @@
 	return 0;
 }
 
+static int __maybe_unused victim_bits_seq_show(struct seq_file *seq,
+						void *offset)
+{
+	struct super_block *sb = seq->private;
+	struct f2fs_sb_info *sbi = F2FS_SB(sb);
+	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+	int i;
+
+	seq_puts(seq, "format: victim_secmap bitmaps\n");
+
+	for (i = 0; i < MAIN_SECS(sbi); i++) {
+		if ((i % 10) == 0)
+			seq_printf(seq, "%-10d", i);
+		seq_printf(seq, "%d", test_bit(i, dirty_i->victim_secmap) ? 1 : 0);
+		if ((i % 10) == 9 || i == (MAIN_SECS(sbi) - 1))
+			seq_putc(seq, '\n');
+		else
+			seq_putc(seq, ' ');
+	}
+	return 0;
+}
+
 #define F2FS_PROC_FILE_DEF(_name)					\
 static int _name##_open_fs(struct inode *inode, struct file *file)	\
 {									\
@@ -627,6 +656,7 @@
 F2FS_PROC_FILE_DEF(segment_info);
 F2FS_PROC_FILE_DEF(segment_bits);
 F2FS_PROC_FILE_DEF(iostat_info);
+F2FS_PROC_FILE_DEF(victim_bits);
 
 int __init f2fs_init_sysfs(void)
 {
@@ -677,6 +707,8 @@
 				 &f2fs_seq_segment_bits_fops, sb);
 		proc_create_data("iostat_info", S_IRUGO, sbi->s_proc,
 				&f2fs_seq_iostat_info_fops, sb);
+		proc_create_data("victim_bits", S_IRUGO, sbi->s_proc,
+				&f2fs_seq_victim_bits_fops, sb);
 	}
 	return 0;
 }
@@ -687,6 +719,7 @@
 		remove_proc_entry("iostat_info", sbi->s_proc);
 		remove_proc_entry("segment_info", sbi->s_proc);
 		remove_proc_entry("segment_bits", sbi->s_proc);
+		remove_proc_entry("victim_bits", sbi->s_proc);
 		remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
 	}
 	kobject_del(&sbi->s_kobj);
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 7082718..77a010e 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -37,9 +37,6 @@
 			return -EOPNOTSUPP;
 		break;
 	case F2FS_XATTR_INDEX_TRUSTED:
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-		break;
 	case F2FS_XATTR_INDEX_SECURITY:
 		break;
 	default:
@@ -62,9 +59,6 @@
 			return -EOPNOTSUPP;
 		break;
 	case F2FS_XATTR_INDEX_TRUSTED:
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-		break;
 	case F2FS_XATTR_INDEX_SECURITY:
 		break;
 	default:
@@ -100,12 +94,22 @@
 		const char *name, const void *value,
 		size_t size, int flags)
 {
+	unsigned char old_advise = F2FS_I(inode)->i_advise;
+	unsigned char new_advise;
+
 	if (!inode_owner_or_capable(inode))
 		return -EPERM;
 	if (value == NULL)
 		return -EINVAL;
 
-	F2FS_I(inode)->i_advise |= *(char *)value;
+	new_advise = *(char *)value;
+	if (new_advise & ~FADVISE_MODIFIABLE_BITS)
+		return -EINVAL;
+
+	new_advise = new_advise & FADVISE_MODIFIABLE_BITS;
+	new_advise |= old_advise & ~FADVISE_MODIFIABLE_BITS;
+
+	F2FS_I(inode)->i_advise = new_advise;
 	f2fs_mark_inode_dirty_sync(inode, true);
 	return 0;
 }
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index eef0caf..e949551 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1725,6 +1725,7 @@
 	if (status) {
 		op = &args->ops[0];
 		op->status = status;
+		resp->opcnt = 1;
 		goto encode_op;
 	}
 
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 8f040f8..25c8b32 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -341,6 +341,7 @@
 				 * for this bh as it's not marked locally
 				 * uptodate. */
 				status = -EIO;
+				clear_buffer_needs_validate(bh);
 				put_bh(bh);
 				bhs[i] = NULL;
 				continue;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 3f828a1..0cc30a5 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -589,9 +589,9 @@
 
 	res->last_used = 0;
 
-	spin_lock(&dlm->spinlock);
+	spin_lock(&dlm->track_lock);
 	list_add_tail(&res->tracking, &dlm->tracking_list);
-	spin_unlock(&dlm->spinlock);
+	spin_unlock(&dlm->track_lock);
 
 	memset(res->lvb, 0, DLM_LVB_LEN);
 	memset(res->refmap, 0, sizeof(res->refmap));
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 0f23b3b..37e04a0 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -455,6 +455,20 @@
 	int err;
 	int i;
 
+	/*
+	 * The ability to racily run the kernel stack unwinder on a running task
+	 * and then observe the unwinder output is scary; while it is useful for
+	 * debugging kernel issues, it can also allow an attacker to leak kernel
+	 * stack contents.
+	 * Doing this in a manner that is at least safe from races would require
+	 * some work to ensure that the remote task can not be scheduled; and
+	 * even then, this would still expose the unwinder as local attack
+	 * surface.
+	 * Therefore, this interface is restricted to root.
+	 */
+	if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
+		return -EACCES;
+
 	entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
 	if (!entries)
 		return -ENOMEM;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index c2afe39..4298a39 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -457,17 +457,12 @@
 	return inode;
 }
 
-int proc_fill_super(struct super_block *s, void *data, int silent)
+int proc_fill_super(struct super_block *s)
 {
-	struct pid_namespace *ns = get_pid_ns(s->s_fs_info);
 	struct inode *root_inode;
 	int ret;
 
-	if (!proc_parse_options(data, ns))
-		return -EINVAL;
-
-	/* User space would break if executables or devices appear on proc */
-	s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
+	s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NODEV;
 	s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
 	s->s_blocksize = 1024;
 	s->s_blocksize_bits = 10;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 03d8d14..d960512 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -214,7 +214,7 @@
 
 extern void proc_init_inodecache(void);
 extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
-extern int proc_fill_super(struct super_block *, void *data, int flags);
+extern int proc_fill_super(struct super_block *);
 extern void proc_entry_rundown(struct proc_dir_entry *);
 
 /*
@@ -281,7 +281,6 @@
  * root.c
  */
 extern struct proc_dir_entry proc_root;
-extern int proc_parse_options(char *options, struct pid_namespace *pid);
 
 extern void proc_self_init(void);
 extern int proc_remount(struct super_block *, int *, char *);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index c2f5014..1d68fcd 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -23,6 +23,21 @@
 
 #include "internal.h"
 
+static int proc_test_super(struct super_block *sb, void *data)
+{
+	return sb->s_fs_info == data;
+}
+
+static int proc_set_super(struct super_block *sb, void *data)
+{
+	int err = set_anon_super(sb, NULL);
+	if (!err) {
+		struct pid_namespace *ns = (struct pid_namespace *)data;
+		sb->s_fs_info = get_pid_ns(ns);
+	}
+	return err;
+}
+
 enum {
 	Opt_gid, Opt_hidepid, Opt_err,
 };
@@ -33,7 +48,7 @@
 	{Opt_err, NULL},
 };
 
-int proc_parse_options(char *options, struct pid_namespace *pid)
+static int proc_parse_options(char *options, struct pid_namespace *pid)
 {
 	char *p;
 	substring_t args[MAX_OPT_ARGS];
@@ -85,16 +100,45 @@
 static struct dentry *proc_mount(struct file_system_type *fs_type,
 	int flags, const char *dev_name, void *data)
 {
+	int err;
+	struct super_block *sb;
 	struct pid_namespace *ns;
+	char *options;
 
 	if (flags & MS_KERNMOUNT) {
-		ns = data;
-		data = NULL;
+		ns = (struct pid_namespace *)data;
+		options = NULL;
 	} else {
 		ns = task_active_pid_ns(current);
+		options = data;
+
+		/* Does the mounter have privilege over the pid namespace? */
+		if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
+			return ERR_PTR(-EPERM);
 	}
 
-	return mount_ns(fs_type, flags, data, ns, ns->user_ns, proc_fill_super);
+	sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns);
+	if (IS_ERR(sb))
+		return ERR_CAST(sb);
+
+	if (!proc_parse_options(options, ns)) {
+		deactivate_locked_super(sb);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!sb->s_root) {
+		err = proc_fill_super(sb);
+		if (err) {
+			deactivate_locked_super(sb);
+			return ERR_PTR(err);
+		}
+
+		sb->s_flags |= MS_ACTIVE;
+		/* User space would break if executables appear on proc */
+		sb->s_iflags |= SB_I_NOEXEC;
+	}
+
+	return dget(sb->s_root);
 }
 
 static void proc_kill_sb(struct super_block *sb)
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index 1461254..271c4c4 100644
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -118,7 +118,11 @@
 		goto out;
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+	saved_cred = override_fsids(sbi, SDCARDFS_I(file_inode(file))->data);
+	if (!saved_cred) {
+		err = -ENOMEM;
+		goto out;
+	}
 
 	if (lower_file->f_op->unlocked_ioctl)
 		err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
@@ -127,7 +131,7 @@
 	if (!err)
 		sdcardfs_copy_and_fix_attrs(file_inode(file),
 				      file_inode(lower_file));
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out:
 	return err;
 }
@@ -149,12 +153,16 @@
 		goto out;
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+	saved_cred = override_fsids(sbi, SDCARDFS_I(file_inode(file))->data);
+	if (!saved_cred) {
+		err = -ENOMEM;
+		goto out;
+	}
 
 	if (lower_file->f_op->compat_ioctl)
 		err = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
 
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out:
 	return err;
 }
@@ -241,7 +249,11 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(inode));
+	saved_cred = override_fsids(sbi, SDCARDFS_I(inode)->data);
+	if (!saved_cred) {
+		err = -ENOMEM;
+		goto out_err;
+	}
 
 	file->private_data =
 		kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL);
@@ -271,7 +283,7 @@
 		sdcardfs_copy_and_fix_attrs(inode, sdcardfs_lower_inode(inode));
 
 out_revert_cred:
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_err:
 	dput(parent);
 	return err;
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 30d4db2..7c08ffe 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -22,7 +22,6 @@
 #include <linux/fs_struct.h>
 #include <linux/ratelimit.h>
 
-/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
 const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
 		struct sdcardfs_inode_data *data)
 {
@@ -50,7 +49,6 @@
 	return old_cred;
 }
 
-/* Do not directly use this function, use REVERT_CRED() instead. */
 void revert_fsids(const struct cred *old_cred)
 {
 	const struct cred *cur_cred;
@@ -78,7 +76,10 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+	saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+					SDCARDFS_I(dir)->data);
+	if (!saved_cred)
+		return -ENOMEM;
 
 	sdcardfs_get_lower_path(dentry, &lower_path);
 	lower_dentry = lower_path.dentry;
@@ -95,8 +96,11 @@
 		err = -ENOMEM;
 		goto out_unlock;
 	}
+	copied_fs->umask = 0;
+	task_lock(current);
 	current->fs = copied_fs;
-	current->fs->umask = 0;
+	task_unlock(current);
+
 	err = vfs_create2(lower_dentry_mnt, d_inode(lower_parent_dentry), lower_dentry, mode, want_excl);
 	if (err)
 		goto out;
@@ -110,58 +114,18 @@
 	fixup_lower_ownership(dentry, dentry->d_name.name);
 
 out:
+	task_lock(current);
 	current->fs = saved_fs;
+	task_unlock(current);
 	free_fs_struct(copied_fs);
 out_unlock:
 	unlock_dir(lower_parent_dentry);
 	sdcardfs_put_lower_path(dentry, &lower_path);
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_eacces:
 	return err;
 }
 
-#if 0
-static int sdcardfs_link(struct dentry *old_dentry, struct inode *dir,
-		       struct dentry *new_dentry)
-{
-	struct dentry *lower_old_dentry;
-	struct dentry *lower_new_dentry;
-	struct dentry *lower_dir_dentry;
-	u64 file_size_save;
-	int err;
-	struct path lower_old_path, lower_new_path;
-
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
-	file_size_save = i_size_read(d_inode(old_dentry));
-	sdcardfs_get_lower_path(old_dentry, &lower_old_path);
-	sdcardfs_get_lower_path(new_dentry, &lower_new_path);
-	lower_old_dentry = lower_old_path.dentry;
-	lower_new_dentry = lower_new_path.dentry;
-	lower_dir_dentry = lock_parent(lower_new_dentry);
-
-	err = vfs_link(lower_old_dentry, d_inode(lower_dir_dentry),
-		       lower_new_dentry, NULL);
-	if (err || !d_inode(lower_new_dentry))
-		goto out;
-
-	err = sdcardfs_interpose(new_dentry, dir->i_sb, &lower_new_path);
-	if (err)
-		goto out;
-	fsstack_copy_attr_times(dir, d_inode(lower_new_dentry));
-	fsstack_copy_inode_size(dir, d_inode(lower_new_dentry));
-	set_nlink(d_inode(old_dentry),
-		  sdcardfs_lower_inode(d_inode(old_dentry))->i_nlink);
-	i_size_write(d_inode(new_dentry), file_size_save);
-out:
-	unlock_dir(lower_dir_dentry);
-	sdcardfs_put_lower_path(old_dentry, &lower_old_path);
-	sdcardfs_put_lower_path(new_dentry, &lower_new_path);
-	REVERT_CRED();
-	return err;
-}
-#endif
-
 static int sdcardfs_unlink(struct inode *dir, struct dentry *dentry)
 {
 	int err;
@@ -178,7 +142,10 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+	saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+						SDCARDFS_I(dir)->data);
+	if (!saved_cred)
+		return -ENOMEM;
 
 	sdcardfs_get_lower_path(dentry, &lower_path);
 	lower_dentry = lower_path.dentry;
@@ -209,43 +176,11 @@
 	unlock_dir(lower_dir_dentry);
 	dput(lower_dentry);
 	sdcardfs_put_lower_path(dentry, &lower_path);
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_eacces:
 	return err;
 }
 
-#if 0
-static int sdcardfs_symlink(struct inode *dir, struct dentry *dentry,
-			  const char *symname)
-{
-	int err;
-	struct dentry *lower_dentry;
-	struct dentry *lower_parent_dentry = NULL;
-	struct path lower_path;
-
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
-	sdcardfs_get_lower_path(dentry, &lower_path);
-	lower_dentry = lower_path.dentry;
-	lower_parent_dentry = lock_parent(lower_dentry);
-
-	err = vfs_symlink(d_inode(lower_parent_dentry), lower_dentry, symname);
-	if (err)
-		goto out;
-	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
-	if (err)
-		goto out;
-	fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
-	fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
-
-out:
-	unlock_dir(lower_parent_dentry);
-	sdcardfs_put_lower_path(dentry, &lower_path);
-	REVERT_CRED();
-	return err;
-}
-#endif
-
 static int touch(char *abs_path, mode_t mode)
 {
 	struct file *filp = filp_open(abs_path, O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW, mode);
@@ -287,7 +222,10 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+	saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+						SDCARDFS_I(dir)->data);
+	if (!saved_cred)
+		return -ENOMEM;
 
 	/* check disk space */
 	parent_dentry = dget_parent(dentry);
@@ -316,8 +254,11 @@
 		unlock_dir(lower_parent_dentry);
 		goto out_unlock;
 	}
+	copied_fs->umask = 0;
+	task_lock(current);
 	current->fs = copied_fs;
-	current->fs->umask = 0;
+	task_unlock(current);
+
 	err = vfs_mkdir2(lower_mnt, d_inode(lower_parent_dentry), lower_dentry, mode);
 
 	if (err) {
@@ -366,23 +307,34 @@
 	if (make_nomedia_in_obb ||
 		((pd->perm == PERM_ANDROID)
 				&& (qstr_case_eq(&dentry->d_name, &q_data)))) {
-		REVERT_CRED(saved_cred);
-		OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(d_inode(dentry)));
+		revert_fsids(saved_cred);
+		saved_cred = override_fsids(sbi,
+					SDCARDFS_I(d_inode(dentry))->data);
+		if (!saved_cred) {
+			pr_err("sdcardfs: failed to set up .nomedia in %s: %d\n",
+						lower_path.dentry->d_name.name,
+						-ENOMEM);
+			goto out;
+		}
 		set_fs_pwd(current->fs, &lower_path);
 		touch_err = touch(".nomedia", 0664);
 		if (touch_err) {
 			pr_err("sdcardfs: failed to create .nomedia in %s: %d\n",
-							lower_path.dentry->d_name.name, touch_err);
+						lower_path.dentry->d_name.name,
+						touch_err);
 			goto out;
 		}
 	}
 out:
+	task_lock(current);
 	current->fs = saved_fs;
+	task_unlock(current);
+
 	free_fs_struct(copied_fs);
 out_unlock:
 	sdcardfs_put_lower_path(dentry, &lower_path);
 out_revert:
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_eacces:
 	return err;
 }
@@ -402,7 +354,10 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+	saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+						SDCARDFS_I(dir)->data);
+	if (!saved_cred)
+		return -ENOMEM;
 
 	/* sdcardfs_get_real_lower(): in case of remove an user's obb dentry
 	 * the dentry on the original path should be deleted.
@@ -427,44 +382,11 @@
 out:
 	unlock_dir(lower_dir_dentry);
 	sdcardfs_put_real_lower(dentry, &lower_path);
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_eacces:
 	return err;
 }
 
-#if 0
-static int sdcardfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
-			dev_t dev)
-{
-	int err;
-	struct dentry *lower_dentry;
-	struct dentry *lower_parent_dentry = NULL;
-	struct path lower_path;
-
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
-	sdcardfs_get_lower_path(dentry, &lower_path);
-	lower_dentry = lower_path.dentry;
-	lower_parent_dentry = lock_parent(lower_dentry);
-
-	err = vfs_mknod(d_inode(lower_parent_dentry), lower_dentry, mode, dev);
-	if (err)
-		goto out;
-
-	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
-	if (err)
-		goto out;
-	fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
-	fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
-
-out:
-	unlock_dir(lower_parent_dentry);
-	sdcardfs_put_lower_path(dentry, &lower_path);
-	REVERT_CRED();
-	return err;
-}
-#endif
-
 /*
  * The locking rules in sdcardfs_rename are complex.  We could use a simpler
  * superblock-level name-space lock for renames and copy-ups.
@@ -493,7 +415,10 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred, SDCARDFS_I(new_dir));
+	saved_cred = override_fsids(SDCARDFS_SB(old_dir->i_sb),
+						SDCARDFS_I(new_dir)->data);
+	if (!saved_cred)
+		return -ENOMEM;
 
 	sdcardfs_get_real_lower(old_dentry, &lower_old_path);
 	sdcardfs_get_lower_path(new_dentry, &lower_new_path);
@@ -540,7 +465,7 @@
 	dput(lower_new_dir_dentry);
 	sdcardfs_put_real_lower(old_dentry, &lower_old_path);
 	sdcardfs_put_lower_path(new_dentry, &lower_new_path);
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_eacces:
 	return err;
 }
@@ -659,33 +584,7 @@
 	if (IS_POSIXACL(inode))
 		pr_warn("%s: This may be undefined behavior...\n", __func__);
 	err = generic_permission(&tmp, mask);
-	/* XXX
-	 * Original sdcardfs code calls inode_permission(lower_inode,.. )
-	 * for checking inode permission. But doing such things here seems
-	 * duplicated work, because the functions called after this func,
-	 * such as vfs_create, vfs_unlink, vfs_rename, and etc,
-	 * does exactly same thing, i.e., they calls inode_permission().
-	 * So we just let they do the things.
-	 * If there are any security hole, just uncomment following if block.
-	 */
-#if 0
-	if (!err) {
-		/*
-		 * Permission check on lower_inode(=EXT4).
-		 * we check it with AID_MEDIA_RW permission
-		 */
-		struct inode *lower_inode;
-
-		OVERRIDE_CRED(SDCARDFS_SB(inode->sb));
-
-		lower_inode = sdcardfs_lower_inode(inode);
-		err = inode_permission(lower_inode, mask);
-
-		REVERT_CRED();
-	}
-#endif
 	return err;
-
 }
 
 static int sdcardfs_setattr_wrn(struct dentry *dentry, struct iattr *ia)
@@ -763,7 +662,10 @@
 		goto out_err;
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dentry->d_sb), saved_cred, SDCARDFS_I(inode));
+	saved_cred = override_fsids(SDCARDFS_SB(dentry->d_sb),
+						SDCARDFS_I(inode)->data);
+	if (!saved_cred)
+		return -ENOMEM;
 
 	sdcardfs_get_lower_path(dentry, &lower_path);
 	lower_dentry = lower_path.dentry;
@@ -822,7 +724,7 @@
 
 out:
 	sdcardfs_put_lower_path(dentry, &lower_path);
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_err:
 	return err;
 }
@@ -905,13 +807,6 @@
 	.setattr	= sdcardfs_setattr_wrn,
 	.setattr2	= sdcardfs_setattr,
 	.getattr	= sdcardfs_getattr,
-	/* XXX Following operations are implemented,
-	 *     but FUSE(sdcard) or FAT does not support them
-	 *     These methods are *NOT* perfectly tested.
-	.symlink	= sdcardfs_symlink,
-	.link		= sdcardfs_link,
-	.mknod		= sdcardfs_mknod,
-	 */
 };
 
 const struct inode_operations sdcardfs_main_iops = {
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 98051996..beec63b 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -426,7 +426,12 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+	saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+						SDCARDFS_I(dir)->data);
+	if (!saved_cred) {
+		ret = ERR_PTR(-ENOMEM);
+		goto out_err;
+	}
 
 	sdcardfs_get_lower_path(parent, &lower_parent_path);
 
@@ -457,7 +462,7 @@
 
 out:
 	sdcardfs_put_lower_path(parent, &lower_parent_path);
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_err:
 	dput(parent);
 	return ret;
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 826afb5..ec2290a 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -88,31 +88,6 @@
 		(x)->i_mode = ((x)->i_mode & S_IFMT) | 0775;\
 	} while (0)
 
-/* OVERRIDE_CRED() and REVERT_CRED()
- *	OVERRIDE_CRED()
- *		backup original task->cred
- *		and modifies task->cred->fsuid/fsgid to specified value.
- *	REVERT_CRED()
- *		restore original task->cred->fsuid/fsgid.
- * These two macro should be used in pair, and OVERRIDE_CRED() should be
- * placed at the beginning of a function, right after variable declaration.
- */
-#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred, info)		\
-	do {	\
-		saved_cred = override_fsids(sdcardfs_sbi, info->data);	\
-		if (!saved_cred)	\
-			return -ENOMEM;	\
-	} while (0)
-
-#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred, info)	\
-	do {	\
-		saved_cred = override_fsids(sdcardfs_sbi, info->data);	\
-		if (!saved_cred)	\
-			return ERR_PTR(-ENOMEM);	\
-	} while (0)
-
-#define REVERT_CRED(saved_cred)	revert_fsids(saved_cred)
-
 /* Android 5.0 support */
 
 /* Permission mode for a specific node. Controls how file permissions
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 03dda1c..727a9e3 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1918,6 +1918,9 @@
 	int dev, vol;
 	char *endptr;
 
+	if (!name || !*name)
+		return ERR_PTR(-EINVAL);
+
 	/* First, try to open using the device node path method */
 	ubi = ubi_open_volume_path(name, mode);
 	if (!IS_ERR(ubi))
diff --git a/fs/xattr.c b/fs/xattr.c
index 2c2c28e..1c91835 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -953,17 +953,19 @@
 	int err = 0;
 
 #ifdef CONFIG_FS_POSIX_ACL
-	if (inode->i_acl) {
-		err = xattr_list_one(&buffer, &remaining_size,
-				     XATTR_NAME_POSIX_ACL_ACCESS);
-		if (err)
-			return err;
-	}
-	if (inode->i_default_acl) {
-		err = xattr_list_one(&buffer, &remaining_size,
-				     XATTR_NAME_POSIX_ACL_DEFAULT);
-		if (err)
-			return err;
+	if (IS_POSIXACL(inode)) {
+		if (inode->i_acl) {
+			err = xattr_list_one(&buffer, &remaining_size,
+					     XATTR_NAME_POSIX_ACL_ACCESS);
+			if (err)
+				return err;
+		}
+		if (inode->i_default_acl) {
+			err = xattr_list_one(&buffer, &remaining_size,
+					     XATTR_NAME_POSIX_ACL_DEFAULT);
+			if (err)
+				return err;
+		}
 	}
 #endif
 
diff --git a/include/Kbuild b/include/Kbuild
index bab1145..9205b04 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -1,2 +1,6 @@
 # Top-level Makefile calls into asm-$(ARCH)
 # List only non-arch directories below
+
+ifneq ($(VSERVICES_SUPPORT), "")
+header-y += vservices/
+endif
diff --git a/include/asm-generic/okl4_virq.h b/include/asm-generic/okl4_virq.h
new file mode 100644
index 0000000..2eca110
--- /dev/null
+++ b/include/asm-generic/okl4_virq.h
@@ -0,0 +1,27 @@
+/*
+ * include/asm-generic/okl4_virq.h
+ *
+ * Copyright (c) 2017 General Dynamics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OKL4_VIRQ_H__
+#define __OKL4_VIRQ_H__
+
+#include <linux/irq.h>
+#include <microvisor/microvisor.h>
+
+static inline okl4_virq_flags_t okl4_get_virq_payload(unsigned int irq)
+{
+	struct irq_data *irqd = irq_get_irq_data(irq);
+
+	if (WARN_ON_ONCE(!irqd))
+		return 0;
+
+	return _okl4_sys_interrupt_get_payload(irqd_to_hwirq(irqd)).payload;
+}
+
+#endif
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
index 8108c98..f9781b5 100644
--- a/include/dt-bindings/clock/mdss-10nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -26,16 +26,32 @@
 #define PCLK_SRC_MUX_0_CLK	7
 #define PCLK_SRC_0_CLK		8
 #define PCLK_MUX_0_CLK		9
-#define VCO_CLK_1		10
-#define PLL_OUT_DIV_1_CLK	11
-#define BITCLK_SRC_1_CLK	12
-#define BYTECLK_SRC_1_CLK	13
-#define POST_BIT_DIV_1_CLK	14
-#define POST_VCO_DIV_1_CLK	15
-#define BYTECLK_MUX_1_CLK	16
-#define PCLK_SRC_MUX_1_CLK	17
-#define PCLK_SRC_1_CLK		18
-#define PCLK_MUX_1_CLK		19
+#define SHADOW_VCO_CLK_0		10
+#define SHADOW_PLL_OUT_DIV_0_CLK	11
+#define SHADOW_BITCLK_SRC_0_CLK		12
+#define SHADOW_BYTECLK_SRC_0_CLK	13
+#define SHADOW_POST_BIT_DIV_0_CLK	14
+#define SHADOW_POST_VCO_DIV_0_CLK	15
+#define SHADOW_PCLK_SRC_MUX_0_CLK	16
+#define SHADOW_PCLK_SRC_0_CLK		17
+#define VCO_CLK_1		18
+#define PLL_OUT_DIV_1_CLK	19
+#define BITCLK_SRC_1_CLK	20
+#define BYTECLK_SRC_1_CLK	21
+#define POST_BIT_DIV_1_CLK	22
+#define POST_VCO_DIV_1_CLK	23
+#define BYTECLK_MUX_1_CLK	24
+#define PCLK_SRC_MUX_1_CLK	25
+#define PCLK_SRC_1_CLK		26
+#define PCLK_MUX_1_CLK		27
+#define SHADOW_VCO_CLK_1		28
+#define SHADOW_PLL_OUT_DIV_1_CLK	29
+#define SHADOW_BITCLK_SRC_1_CLK		30
+#define SHADOW_BYTECLK_SRC_1_CLK	31
+#define SHADOW_POST_BIT_DIV_1_CLK	32
+#define SHADOW_POST_VCO_DIV_1_CLK	33
+#define SHADOW_PCLK_SRC_MUX_1_CLK	34
+#define SHADOW_PCLK_SRC_1_CLK		35
 
 /* DP PLL clocks */
 #define	DP_VCO_CLK	0
diff --git a/include/linux/Kbuild.vservices b/include/linux/Kbuild.vservices
new file mode 100644
index 0000000..392f559
--- /dev/null
+++ b/include/linux/Kbuild.vservices
@@ -0,0 +1,3 @@
+#
+# Virtual Services headers which need to be exported for user-space
+#
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 43072b1..9f721ee 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -201,47 +201,57 @@
 
 #define __declare_arg_0(a0, res)					\
 	struct arm_smccc_res   *___res = res;				\
-	register u32           r0 SMCCC_REG(0) = a0;			\
+	register unsigned long r0 SMCCC_REG(0) = (u32)a0;		\
 	register unsigned long r1 SMCCC_REG(1);				\
 	register unsigned long r2 SMCCC_REG(2);				\
 	register unsigned long r3 SMCCC_REG(3)
 
 #define __declare_arg_1(a0, a1, res)					\
+	typeof(a1) __a1 = a1;						\
 	struct arm_smccc_res   *___res = res;				\
-	register u32           r0 SMCCC_REG(0) = a0;			\
-	register typeof(a1)    r1 SMCCC_REG(1) = a1;			\
+	register unsigned long r0 SMCCC_REG(0) = (u32)a0;		\
+	register unsigned long r1 SMCCC_REG(1) = __a1;			\
 	register unsigned long r2 SMCCC_REG(2);				\
 	register unsigned long r3 SMCCC_REG(3)
 
 #define __declare_arg_2(a0, a1, a2, res)				\
+	typeof(a1) __a1 = a1;						\
+	typeof(a2) __a2 = a2;						\
 	struct arm_smccc_res   *___res = res;				\
-	register u32           r0 SMCCC_REG(0) = a0;			\
-	register typeof(a1)    r1 SMCCC_REG(1) = a1;			\
-	register typeof(a2)    r2 SMCCC_REG(2) = a2;			\
+	register unsigned long r0 SMCCC_REG(0) = (u32)a0;		\
+	register unsigned long r1 SMCCC_REG(1) = __a1;			\
+	register unsigned long r2 SMCCC_REG(2) = __a2;			\
 	register unsigned long r3 SMCCC_REG(3)
 
 #define __declare_arg_3(a0, a1, a2, a3, res)				\
+	typeof(a1) __a1 = a1;						\
+	typeof(a2) __a2 = a2;						\
+	typeof(a3) __a3 = a3;						\
 	struct arm_smccc_res   *___res = res;				\
-	register u32           r0 SMCCC_REG(0) = a0;			\
-	register typeof(a1)    r1 SMCCC_REG(1) = a1;			\
-	register typeof(a2)    r2 SMCCC_REG(2) = a2;			\
-	register typeof(a3)    r3 SMCCC_REG(3) = a3
+	register unsigned long r0 SMCCC_REG(0) = (u32)a0;		\
+	register unsigned long r1 SMCCC_REG(1) = __a1;			\
+	register unsigned long r2 SMCCC_REG(2) = __a2;			\
+	register unsigned long r3 SMCCC_REG(3) = __a3
 
 #define __declare_arg_4(a0, a1, a2, a3, a4, res)			\
+	typeof(a4) __a4 = a4;						\
 	__declare_arg_3(a0, a1, a2, a3, res);				\
-	register typeof(a4) r4 SMCCC_REG(4) = a4
+	register unsigned long r4 SMCCC_REG(4) = __a4
 
 #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res)			\
+	typeof(a5) __a5 = a5;						\
 	__declare_arg_4(a0, a1, a2, a3, a4, res);			\
-	register typeof(a5) r5 SMCCC_REG(5) = a5
+	register unsigned long r5 SMCCC_REG(5) = __a5
 
 #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res)		\
+	typeof(a6) __a6 = a6;						\
 	__declare_arg_5(a0, a1, a2, a3, a4, a5, res);			\
-	register typeof(a6) r6 SMCCC_REG(6) = a6
+	register unsigned long r6 SMCCC_REG(6) = __a6
 
 #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res)		\
+	typeof(a7) __a7 = a7;						\
 	__declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res);		\
-	register typeof(a7) r7 SMCCC_REG(7) = a7
+	register unsigned long r7 SMCCC_REG(7) = __a7
 
 #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
 #define __declare_args(count, ...)  ___declare_args(count, __VA_ARGS__)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 0fbce32..48bc2b7 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -425,6 +425,7 @@
 #define CPUFREQ_CREATE_POLICY		(3)
 #define CPUFREQ_REMOVE_POLICY		(4)
 #define CPUFREQ_STOP			(5)
+#define CPUFREQ_INCOMPATIBLE		(6)
 
 /* Govinfo Notifiers */
 #define CPUFREQ_LOAD_CHANGE		(0)
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index aa5db8b..f70f8ac 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -304,11 +304,6 @@
  * For NAT entries
  */
 #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
-#define NAT_ENTRY_BITMAP_SIZE	((NAT_ENTRY_PER_BLOCK + 7) / 8)
-#define NAT_ENTRY_BITMAP_SIZE_ALIGNED				\
-	((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) /		\
-	BITS_PER_LONG * BITS_PER_LONG)
-
 
 struct f2fs_nat_entry {
 	__u8 version;		/* latest version of cached nat entry */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index c2a0f00..734377a 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -292,6 +292,8 @@
 	return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
 }
 
+extern u64 jiffies64_to_nsecs(u64 j);
+
 extern unsigned long __msecs_to_jiffies(const unsigned int m);
 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
 /*
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e1b845a..153ed93 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2335,6 +2335,13 @@
 	struct net_device *dev;
 };
 
+struct netdev_notifier_info_ext {
+	struct netdev_notifier_info info; /* must be first */
+	union {
+		u32 mtu;
+	} ext;
+};
+
 struct netdev_notifier_change_info {
 	struct netdev_notifier_info info; /* must be first */
 	unsigned int flags_changed;
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 984b211..ea8a977 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -123,4 +123,9 @@
 /* True if the target is not a standard target */
 #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
 
+static inline bool ebt_invalid_target(int target)
+{
+	return (target < -NUM_STANDARD_TARGETS || target >= 0);
+}
+
 #endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 47c5b39..d32e7b8 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -266,7 +266,7 @@
 	int				capabilities;
 
 	int * __percpu			pmu_disable_count;
-	struct perf_cpu_context * __percpu pmu_cpu_context;
+	struct perf_cpu_context __percpu *pmu_cpu_context;
 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
 	int				task_ctx_nr;
 	int				hrtimer_interval_ms;
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
index 9abc0ca..9f0aa1b 100644
--- a/include/linux/platform_data/ina2xx.h
+++ b/include/linux/platform_data/ina2xx.h
@@ -1,7 +1,7 @@
 /*
  * Driver for Texas Instruments INA219, INA226 power monitor chips
  *
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 4c07788..731dcef 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -443,6 +443,7 @@
 	spinlock_t changed_lock;
 	bool changed;
 	bool initialized;
+	bool removing;
 	atomic_t use_cnt;
 #ifdef CONFIG_THERMAL
 	struct thermal_zone_device *tzd;
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 85d1ffc..4421e5c 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -138,7 +138,6 @@
 /**
  * struct rhashtable - Hash table handle
  * @tbl: Bucket table
- * @nelems: Number of elements in table
  * @key_len: Key length for hashfn
  * @elasticity: Maximum chain length before rehash
  * @p: Configuration parameters
@@ -146,10 +145,10 @@
  * @run_work: Deferred worker to expand/shrink asynchronously
  * @mutex: Mutex to protect current/future table swapping
  * @lock: Spin lock to protect walker list
+ * @nelems: Number of elements in table
  */
 struct rhashtable {
 	struct bucket_table __rcu	*tbl;
-	atomic_t			nelems;
 	unsigned int			key_len;
 	unsigned int			elasticity;
 	struct rhashtable_params	p;
@@ -157,6 +156,7 @@
 	struct work_struct		run_work;
 	struct mutex                    mutex;
 	spinlock_t			lock;
+	atomic_t			nelems;
 };
 
 /**
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9b8e0ed..11bb7cb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -643,9 +643,14 @@
 				struct skb_mstamp skb_mstamp;
 			};
 		};
-		struct rb_node	rbnode; /* used in netem & tcp stack */
+		struct rb_node		rbnode; /* used in netem, ip4 defrag, and tcp stack */
 	};
-	struct sock		*sk;
+
+	union {
+		struct sock		*sk;
+		int			ip_defrag_offset;
+	};
+
 	struct net_device	*dev;
 
 	/*
@@ -2417,7 +2422,7 @@
 		kfree_skb(skb);
 }
 
-void skb_rbtree_purge(struct rb_root *root);
+unsigned int skb_rbtree_purge(struct rb_root *root);
 
 void *netdev_alloc_frag(unsigned int fragsz);
 
@@ -2953,6 +2958,7 @@
 	return skb->data;
 }
 
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
 /**
  *	pskb_trim_rcsum - trim received skb and update checksum
  *	@skb: buffer to trim
@@ -2966,9 +2972,7 @@
 {
 	if (likely(len >= skb->len))
 		return 0;
-	if (skb->ip_summed == CHECKSUM_COMPLETE)
-		skb->ip_summed = CHECKSUM_NONE;
-	return __pskb_trim(skb, len);
+	return pskb_trim_rcsum_slow(skb, len);
 }
 
 static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
@@ -2988,6 +2992,12 @@
 
 #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
 
+#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+#define skb_rb_first(root) rb_to_skb(rb_first(root))
+#define skb_rb_last(root)  rb_to_skb(rb_last(root))
+#define skb_rb_next(skb)   rb_to_skb(rb_next(&(skb)->rbnode))
+#define skb_rb_prev(skb)   rb_to_skb(rb_prev(&(skb)->rbnode))
+
 #define skb_queue_walk(queue, skb) \
 		for (skb = (queue)->next;					\
 		     skb != (struct sk_buff *)(queue);				\
@@ -3002,6 +3012,18 @@
 		for (; skb != (struct sk_buff *)(queue);			\
 		     skb = skb->next)
 
+#define skb_rbtree_walk(skb, root)						\
+		for (skb = skb_rb_first(root); skb != NULL;			\
+		     skb = skb_rb_next(skb))
+
+#define skb_rbtree_walk_from(skb)						\
+		for (; skb != NULL;						\
+		     skb = skb_rb_next(skb))
+
+#define skb_rbtree_walk_from_safe(skb, tmp)					\
+		for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL);	\
+		     skb = tmp)
+
 #define skb_queue_walk_from_safe(queue, skb, tmp)				\
 		for (tmp = skb->next;						\
 		     skb != (struct sk_buff *)(queue);				\
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 75f56c2..b6a59e8 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -67,7 +67,8 @@
 	int size;		/* The size of an object including meta data */
 	int object_size;	/* The size of an object without meta data */
 	int offset;		/* Free pointer offset. */
-	int cpu_partial;	/* Number of per cpu partial objects to keep around */
+	/* Number of per cpu partial objects to keep around */
+	unsigned int cpu_partial;
 	struct kmem_cache_order_objects oo;
 
 	/* Allocation and freeing of slabs */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index fe1b862..bcfeb9e 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -313,6 +313,10 @@
 	wait_queue_head_t write_wait;
 	wait_queue_head_t read_wait;
 	struct work_struct hangup_work;
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+	int delayed_work;
+	struct delayed_work echo_delayed_work;
+#endif
 	void *disc_data;
 	void *driver_data;
 	spinlock_t files_lock;		/* protects tty_files list */
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index e19e624..d267160 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -42,10 +42,13 @@
  * @prio: priority of the file handler, as defined by &enum v4l2_priority
  *
  * @wait: event' s wait queue
+ * @subscribe_lock: serialise changes to the subscribed list; guarantee that
+ *		    the add and del event callbacks are orderly called
  * @subscribed: list of subscribed events
  * @available: list of events waiting to be dequeued
  * @navailable: number of available events at @available list
  * @sequence: event sequence number
+ *
  * @m2m_ctx: pointer to &struct v4l2_m2m_ctx
  */
 struct v4l2_fh {
@@ -56,6 +59,7 @@
 
 	/* Events */
 	wait_queue_head_t	wait;
+	struct mutex		subscribe_lock;
 	struct list_head	subscribed;
 	struct list_head	available;
 	unsigned int		navailable;
diff --git a/include/microvisor/kernel/microvisor.h b/include/microvisor/kernel/microvisor.h
new file mode 100644
index 0000000..1a30d1f
--- /dev/null
+++ b/include/microvisor/kernel/microvisor.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+/** @addtogroup lib_microvisor
+ * @{
+ */
+
+
+#ifndef __AUTO__MICROVISOR_H__
+#define __AUTO__MICROVISOR_H__
+
+/** SDK Major number */
+#define OKL4_SDK_VERSION_MAJOR 5
+/** SDK Minor number */
+#define OKL4_SDK_VERSION_MINOR 3
+/**
+ * If defined, indicates this is an internal development version.
+ * In this case, OKL4_SDK_VERSION_RELEASE == -1
+ */
+#define OKL4_SDK_VERSION_DEVELOPMENT 1
+/** SDK Release (revision) number */
+#define OKL4_SDK_VERSION_RELEASE (-1)
+/** SDK Maintenance number. Indicates the maintenance sequence revision. */
+#define OKL4_SDK_VERSION_MAINTENANCE 0
+
+
+/** @addtogroup lib_microvisor_helpers Microvisor Helpers
+ * @{
+ */
+
+/** Common C and ASM defines. */
+
+/** OKL4 Kernel supports a Virtual CPU (vCPU) interface. */
+#define OKL4_VCPU_SUPPORT
+
+
+/** OKL4 Kernel vCPU API supports SMP guest cells. */
+#define OKL4_VCPU_SMP_SUPPORT
+
+
+/** @} */
+#endif /* __AUTO__MICROVISOR_H__ */
+/** @} */
diff --git a/include/microvisor/kernel/offsets.h b/include/microvisor/kernel/offsets.h
new file mode 100644
index 0000000..9517acf
--- /dev/null
+++ b/include/microvisor/kernel/offsets.h
@@ -0,0 +1,1534 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+#ifndef __AUTO__MICROVISOR_OFFSETS_H__
+#define __AUTO__MICROVISOR_OFFSETS_H__
+
+#if defined(ASSEMBLY)
+/* LWEE structure's type offsets */
+
+/**
+ *   Offsets for struct okl4_atomic_register
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_REGISTER_SIZE) */
+#define OKL4_STRUCT_ATOMIC_REGISTER_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ATOMIC_REGISTER_VALUE) */
+#define OKL4_OFS_ATOMIC_REGISTER_VALUE (0)
+/**
+ *   Offsets for struct okl4_atomic_uint16
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT16_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT16_SIZE (2)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT16_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT16_VALUE (0)
+/**
+ *   Offsets for struct okl4_atomic_uint32
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT32_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT32_SIZE (4)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT32_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT32_VALUE (0)
+/**
+ *   Offsets for struct okl4_atomic_uint64
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT64_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT64_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT64_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT64_VALUE (0)
+/**
+ *   Offsets for struct okl4_atomic_uint8
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT8_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT8_SIZE (1)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT8_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT8_VALUE (0)
+/**
+ *   Offsets for struct okl4_axon_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_DATA_SIZE) */
+#define OKL4_STRUCT_AXON_DATA_SIZE (12)
+/*lint -esym(621, OKL4_OFS_AXON_DATA_KCAP) */
+#define OKL4_OFS_AXON_DATA_KCAP (0)
+/*lint -esym(621, OKL4_OFS_AXON_DATA_SEGMENT) */
+#define OKL4_OFS_AXON_DATA_SEGMENT (4)
+/*lint -esym(621, OKL4_OFS_AXON_DATA_VIRQ) */
+#define OKL4_OFS_AXON_DATA_VIRQ (8)
+/**
+ *   Offsets for struct okl4_axon_ep_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_EP_DATA_SIZE) */
+#define OKL4_STRUCT_AXON_EP_DATA_SIZE (24)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX) */
+#define OKL4_OFS_AXON_EP_DATA_RX (0)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX_KCAP) */
+#define OKL4_OFS_AXON_EP_DATA_RX_KCAP (0)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX_SEGMENT) */
+#define OKL4_OFS_AXON_EP_DATA_RX_SEGMENT (4)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX_VIRQ) */
+#define OKL4_OFS_AXON_EP_DATA_RX_VIRQ (8)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX) */
+#define OKL4_OFS_AXON_EP_DATA_TX (12)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX_KCAP) */
+#define OKL4_OFS_AXON_EP_DATA_TX_KCAP (12)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX_SEGMENT) */
+#define OKL4_OFS_AXON_EP_DATA_TX_SEGMENT (16)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX_VIRQ) */
+#define OKL4_OFS_AXON_EP_DATA_TX_VIRQ (20)
+/**
+ *   Offsets for struct okl4_axon_queue
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_QUEUE_SIZE) */
+#define OKL4_STRUCT_AXON_QUEUE_SIZE (12)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_QUEUE_QUEUE_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRIES) */
+#define OKL4_OFS_AXON_QUEUE_ENTRIES (4)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_KPTR) */
+#define OKL4_OFS_AXON_QUEUE_KPTR (6)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_UPTR) */
+#define OKL4_OFS_AXON_QUEUE_UPTR (8)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE___PADDING0_2) */
+#define OKL4_OFS_AXON_QUEUE___PADDING0_2 (10)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE___PADDING1_3) */
+#define OKL4_OFS_AXON_QUEUE___PADDING1_3 (11)
+/**
+ *   Offsets for struct okl4_axon_queue_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_QUEUE_ENTRY_SIZE) */
+#define OKL4_STRUCT_AXON_QUEUE_ENTRY_SIZE (24)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY_INFO) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY_INFO (0)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY_DATA_SIZE) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY_DATA_SIZE (8)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY_RECV_SEQUENCE) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY_RECV_SEQUENCE (16)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING0_4) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING0_4 (20)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING1_5) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING1_5 (21)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING2_6) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING2_6 (22)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING3_7) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING3_7 (23)
+/**
+ *   Offsets for struct okl4_axon_rx
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_RX_SIZE) */
+#define OKL4_STRUCT_AXON_RX_SIZE (56)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES) */
+#define OKL4_OFS_AXON_RX_QUEUES (0)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0) */
+#define OKL4_OFS_AXON_RX_QUEUES_0 (0)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_QUEUE_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_ENTRIES (4)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_KPTR (6)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_UPTR (8)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_0___PADDING0_2 (10)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_0___PADDING1_3 (11)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1) */
+#define OKL4_OFS_AXON_RX_QUEUES_1 (12)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_QUEUE_OFFSET (12)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_ENTRIES (16)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_KPTR (18)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_UPTR (20)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_1___PADDING0_2 (22)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_1___PADDING1_3 (23)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_2 (24)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_QUEUE_OFFSET (24)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_ENTRIES (28)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_KPTR (30)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_UPTR (32)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_2___PADDING0_2 (34)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_2___PADDING1_3 (35)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_3 (36)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_QUEUE_OFFSET (36)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_ENTRIES (40)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_KPTR (42)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_UPTR (44)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_3___PADDING0_2 (46)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_3___PADDING1_3 (47)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES (48)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_0) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_0 (48)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_1) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_1 (50)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_2) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_2 (52)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_3) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_3 (54)
+/**
+ *   Offsets for struct okl4_axon_tx
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_TX_SIZE) */
+#define OKL4_STRUCT_AXON_TX_SIZE (48)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES) */
+#define OKL4_OFS_AXON_TX_QUEUES (0)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0) */
+#define OKL4_OFS_AXON_TX_QUEUES_0 (0)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_QUEUE_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_ENTRIES (4)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_KPTR (6)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_UPTR (8)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_0___PADDING0_2 (10)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_0___PADDING1_3 (11)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1) */
+#define OKL4_OFS_AXON_TX_QUEUES_1 (12)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_QUEUE_OFFSET (12)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_ENTRIES (16)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_KPTR (18)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_UPTR (20)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_1___PADDING0_2 (22)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_1___PADDING1_3 (23)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_2 (24)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_QUEUE_OFFSET (24)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_ENTRIES (28)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_KPTR (30)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_UPTR (32)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_2___PADDING0_2 (34)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_2___PADDING1_3 (35)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_3 (36)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_QUEUE_OFFSET (36)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_ENTRIES (40)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_KPTR (42)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_UPTR (44)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_3___PADDING0_2 (46)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_3___PADDING1_3 (47)
+/**
+ *   Offsets for struct okl4_range_item
+ **/
+/*lint -esym(621, OKL4_STRUCT_RANGE_ITEM_SIZE) */
+#define OKL4_STRUCT_RANGE_ITEM_SIZE (16)
+/*lint -esym(621, OKL4_OFS_RANGE_ITEM_BASE) */
+#define OKL4_OFS_RANGE_ITEM_BASE (0)
+/*lint -esym(621, OKL4_OFS_RANGE_ITEM_SIZE) */
+#define OKL4_OFS_RANGE_ITEM_SIZE (8)
+/**
+ *   Offsets for struct okl4_virtmem_item
+ **/
+/*lint -esym(621, OKL4_STRUCT_VIRTMEM_ITEM_SIZE) */
+#define OKL4_STRUCT_VIRTMEM_ITEM_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_ITEM_RANGE) */
+#define OKL4_OFS_VIRTMEM_ITEM_RANGE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_ITEM_RANGE_BASE) */
+#define OKL4_OFS_VIRTMEM_ITEM_RANGE_BASE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_ITEM_RANGE_SIZE) */
+#define OKL4_OFS_VIRTMEM_ITEM_RANGE_SIZE (8)
+/**
+ *   Offsets for struct okl4_cell_management_item
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_ITEM_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_ITEM_SIZE (104)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_ENTRY) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_ENTRY (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_BASE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_BASE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_SIZE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DATA) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DATA (24)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_IMAGE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_IMAGE (32)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MMU) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MMU (40)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING0_4) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING0_4 (44)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING1_5) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING1_5 (45)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING2_6) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING2_6 (46)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING3_7) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING3_7 (47)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_NAME) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_NAME (48)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_REGISTERS_CAP) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_REGISTERS_CAP (56)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_RESET_VIRQ) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_RESET_VIRQ (60)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENT_INDEX) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENT_INDEX (64)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING4_4) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING4_4 (68)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING5_5) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING5_5 (69)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING6_6) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING6_6 (70)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING7_7) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING7_7 (71)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENTS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENTS (72)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_VCPUS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_VCPUS (80)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_BOOT_ONCE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_BOOT_ONCE (88)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_CAN_STOP) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_CAN_STOP (89)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DEFERRED) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DEFERRED (90)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DETACHED) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DETACHED (91)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_ERASE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_ERASE (92)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING8_5) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING8_5 (93)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING9_6) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING9_6 (94)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING10_7) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING10_7 (95)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DTB_ADDRESS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DTB_ADDRESS (96)
+/**
+ *   Offsets for struct okl4_cell_management
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_SIZE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_NUM_ITEMS) */
+#define OKL4_OFS_CELL_MANAGEMENT_NUM_ITEMS (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING0_4) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING1_5) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING2_6) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING3_7) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEMS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEMS (8)
+/**
+ *   Offsets for struct okl4_segment_mapping
+ **/
+/*lint -esym(621, OKL4_STRUCT_SEGMENT_MAPPING_SIZE) */
+#define OKL4_STRUCT_SEGMENT_MAPPING_SIZE (32)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_PHYS_ADDR) */
+#define OKL4_OFS_SEGMENT_MAPPING_PHYS_ADDR (0)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_SIZE) */
+#define OKL4_OFS_SEGMENT_MAPPING_SIZE (8)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_VIRT_ADDR) */
+#define OKL4_OFS_SEGMENT_MAPPING_VIRT_ADDR (16)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_CAP) */
+#define OKL4_OFS_SEGMENT_MAPPING_CAP (24)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_DEVICE) */
+#define OKL4_OFS_SEGMENT_MAPPING_DEVICE (28)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_OWNED) */
+#define OKL4_OFS_SEGMENT_MAPPING_OWNED (29)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING___PADDING0_6) */
+#define OKL4_OFS_SEGMENT_MAPPING___PADDING0_6 (30)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING___PADDING1_7) */
+#define OKL4_OFS_SEGMENT_MAPPING___PADDING1_7 (31)
+/**
+ *   Offsets for struct okl4_cell_management_segments
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_SEGMENTS_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_SEGMENTS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_FREE_SEGMENTS) */
+#define OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_FREE_SEGMENTS (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_NUM_SEGMENTS) */
+#define OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_NUM_SEGMENTS (4)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_SEGMENT_MAPPINGS) */
+#define OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_SEGMENT_MAPPINGS (8)
+/**
+ *   Offsets for struct okl4_cell_management_vcpus
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_VCPUS_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_VCPUS_SIZE (4)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_VCPUS_NUM_VCPUS) */
+#define OKL4_OFS_CELL_MANAGEMENT_VCPUS_NUM_VCPUS (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_VCPUS_VCPU_CAPS) */
+#define OKL4_OFS_CELL_MANAGEMENT_VCPUS_VCPU_CAPS (4)
+/**
+ *   Offsets for struct _okl4_env_hdr
+ **/
+/*lint -esym(621, _OKL4_STRUCT_ENV_HDR_SIZE) */
+#define _OKL4_STRUCT_ENV_HDR_SIZE (4)
+/*lint -esym(621, _OKL4_OFS_ENV_HDR_MAGIC) */
+#define _OKL4_OFS_ENV_HDR_MAGIC (0)
+/*lint -esym(621, _OKL4_OFS_ENV_HDR_COUNT) */
+#define _OKL4_OFS_ENV_HDR_COUNT (2)
+/**
+ *   Offsets for struct _okl4_env_item
+ **/
+/*lint -esym(621, _OKL4_STRUCT_ENV_ITEM_SIZE) */
+#define _OKL4_STRUCT_ENV_ITEM_SIZE (16)
+/*lint -esym(621, _OKL4_OFS_ENV_ITEM_NAME) */
+#define _OKL4_OFS_ENV_ITEM_NAME (0)
+/*lint -esym(621, _OKL4_OFS_ENV_ITEM_ITEM) */
+#define _OKL4_OFS_ENV_ITEM_ITEM (8)
+/**
+ *   Offsets for struct _okl4_env
+ **/
+/*lint -esym(621, _OKL4_STRUCT_ENV_SIZE) */
+#define _OKL4_STRUCT_ENV_SIZE (8)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_HDR) */
+#define _OKL4_OFS_ENV_ENV_HDR (0)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_HDR_MAGIC) */
+#define _OKL4_OFS_ENV_ENV_HDR_MAGIC (0)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_HDR_COUNT) */
+#define _OKL4_OFS_ENV_ENV_HDR_COUNT (2)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING0_4) */
+#define _OKL4_OFS_ENV___PADDING0_4 (4)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING1_5) */
+#define _OKL4_OFS_ENV___PADDING1_5 (5)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING2_6) */
+#define _OKL4_OFS_ENV___PADDING2_6 (6)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING3_7) */
+#define _OKL4_OFS_ENV___PADDING3_7 (7)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_ITEM) */
+#define _OKL4_OFS_ENV_ENV_ITEM (8)
+/**
+ *   Offsets for struct okl4_env_access_cell
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ACCESS_CELL_SIZE) */
+#define OKL4_STRUCT_ENV_ACCESS_CELL_SIZE (16)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_CELL_NAME) */
+#define OKL4_OFS_ENV_ACCESS_CELL_NAME (0)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_CELL_NUM_ENTRIES) */
+#define OKL4_OFS_ENV_ACCESS_CELL_NUM_ENTRIES (8)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_CELL_START_ENTRY) */
+#define OKL4_OFS_ENV_ACCESS_CELL_START_ENTRY (12)
+/**
+ *   Offsets for struct okl4_env_access_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ACCESS_ENTRY_SIZE) */
+#define OKL4_STRUCT_ENV_ACCESS_ENTRY_SIZE (48)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_VIRTUAL_ADDRESS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_VIRTUAL_ADDRESS (0)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_OFFSET) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_OFFSET (8)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_SIZE) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_SIZE (16)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_NUM_SEGS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_NUM_SEGS (24)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_SEGMENT_INDEX) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_SEGMENT_INDEX (28)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_CACHE_ATTRS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_CACHE_ATTRS (32)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_PERMISSIONS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_PERMISSIONS (36)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_OBJECT_NAME) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_OBJECT_NAME (40)
+/**
+ *   Offsets for struct okl4_env_access_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ACCESS_TABLE_SIZE) */
+#define OKL4_STRUCT_ENV_ACCESS_TABLE_SIZE (24)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE_NUM_CELLS) */
+#define OKL4_OFS_ENV_ACCESS_TABLE_NUM_CELLS (0)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING0_4) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING1_5) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING2_6) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING3_7) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE_CELLS) */
+#define OKL4_OFS_ENV_ACCESS_TABLE_CELLS (8)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE_ENTRIES) */
+#define OKL4_OFS_ENV_ACCESS_TABLE_ENTRIES (16)
+/**
+ *   Offsets for struct okl4_env_args
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ARGS_SIZE) */
+#define OKL4_STRUCT_ENV_ARGS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS_ARGC) */
+#define OKL4_OFS_ENV_ARGS_ARGC (0)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING0_4) */
+#define OKL4_OFS_ENV_ARGS___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING1_5) */
+#define OKL4_OFS_ENV_ARGS___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING2_6) */
+#define OKL4_OFS_ENV_ARGS___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING3_7) */
+#define OKL4_OFS_ENV_ARGS___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS_ARGV) */
+#define OKL4_OFS_ENV_ARGS_ARGV (8)
+/**
+ *   Offsets for struct okl4_env_interrupt_device_map
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_INTERRUPT_DEVICE_MAP_SIZE) */
+#define OKL4_STRUCT_ENV_INTERRUPT_DEVICE_MAP_SIZE (4)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_NUM_ENTRIES) */
+#define OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_NUM_ENTRIES (0)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_ENTRIES) */
+#define OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_ENTRIES (4)
+/**
+ *   Offsets for struct okl4_interrupt
+ **/
+/*lint -esym(621, OKL4_STRUCT_INTERRUPT_SIZE) */
+#define OKL4_STRUCT_INTERRUPT_SIZE (4)
+/*lint -esym(621, OKL4_OFS_INTERRUPT_KCAP) */
+#define OKL4_OFS_INTERRUPT_KCAP (0)
+/**
+ *   Offsets for struct okl4_env_interrupt_handle
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_INTERRUPT_HANDLE_SIZE) */
+#define OKL4_STRUCT_ENV_INTERRUPT_HANDLE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_HANDLE_DESCRIPTOR) */
+#define OKL4_OFS_ENV_INTERRUPT_HANDLE_DESCRIPTOR (0)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT) */
+#define OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT (4)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT_KCAP) */
+#define OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT_KCAP (4)
+/**
+ *   Offsets for struct okl4_env_interrupt_list
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_INTERRUPT_LIST_SIZE) */
+#define OKL4_STRUCT_ENV_INTERRUPT_LIST_SIZE (24)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST_NUM_ENTRIES) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST_NUM_ENTRIES (0)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING0_4) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING1_5) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING2_6) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING3_7) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST_DESCRIPTOR) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST_DESCRIPTOR (8)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST_INTERRUPT) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST_INTERRUPT (16)
+/**
+ *   Offsets for struct okl4_env_profile_cell
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_PROFILE_CELL_SIZE) */
+#define OKL4_STRUCT_ENV_PROFILE_CELL_SIZE (48)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME (0)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_0) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_0 (0)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_1) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_1 (1)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_2) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_2 (2)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_3) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_3 (3)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_4) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_5) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_6) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_7) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_8) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_8 (8)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_9) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_9 (9)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_10) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_10 (10)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_11) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_11 (11)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_12) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_12 (12)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_13) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_13 (13)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_14) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_14 (14)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_15) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_15 (15)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_16) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_16 (16)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_17) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_17 (17)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_18) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_18 (18)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_19) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_19 (19)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_20) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_20 (20)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_21) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_21 (21)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_22) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_22 (22)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_23) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_23 (23)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_24) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_24 (24)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_25) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_25 (25)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_26) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_26 (26)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_27) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_27 (27)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_28) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_28 (28)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_29) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_29 (29)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_30) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_30 (30)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_31) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_31 (31)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NUM_CORES) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NUM_CORES (32)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING0_4) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING0_4 (36)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING1_5) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING1_5 (37)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING2_6) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING2_6 (38)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING3_7) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING3_7 (39)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_CORE) */
+#define OKL4_OFS_ENV_PROFILE_CELL_CORE (40)
+/**
+ *   Offsets for struct okl4_env_profile_cpu
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_PROFILE_CPU_SIZE) */
+#define OKL4_STRUCT_ENV_PROFILE_CPU_SIZE (4)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CPU_CAP) */
+#define OKL4_OFS_ENV_PROFILE_CPU_CAP (0)
+/**
+ *   Offsets for struct okl4_env_profile_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_PROFILE_TABLE_SIZE) */
+#define OKL4_STRUCT_ENV_PROFILE_TABLE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_TABLE_NUM_CELL_ENTRIES) */
+#define OKL4_OFS_ENV_PROFILE_TABLE_NUM_CELL_ENTRIES (0)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_TABLE_PCPU_CELL_ENTRY) */
+#define OKL4_OFS_ENV_PROFILE_TABLE_PCPU_CELL_ENTRY (4)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_TABLE_CELLS) */
+#define OKL4_OFS_ENV_PROFILE_TABLE_CELLS (8)
+/**
+ *   Offsets for struct okl4_env_segment
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_SEGMENT_SIZE) */
+#define OKL4_STRUCT_ENV_SEGMENT_SIZE (24)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_BASE) */
+#define OKL4_OFS_ENV_SEGMENT_BASE (0)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_SIZE) */
+#define OKL4_OFS_ENV_SEGMENT_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_CAP_ID) */
+#define OKL4_OFS_ENV_SEGMENT_CAP_ID (16)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_RWX) */
+#define OKL4_OFS_ENV_SEGMENT_RWX (20)
+/**
+ *   Offsets for struct okl4_env_segment_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_SEGMENT_TABLE_SIZE) */
+#define OKL4_STRUCT_ENV_SEGMENT_TABLE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE_NUM_SEGMENTS) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE_NUM_SEGMENTS (0)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING0_4) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING1_5) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING2_6) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING3_7) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE_SEGMENTS) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE_SEGMENTS (8)
+/**
+ *   Offsets for struct okl4_firmware_segment
+ **/
+/*lint -esym(621, OKL4_STRUCT_FIRMWARE_SEGMENT_SIZE) */
+#define OKL4_STRUCT_FIRMWARE_SEGMENT_SIZE (32)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_COPY_ADDR) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_COPY_ADDR (0)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_EXEC_ADDR) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_EXEC_ADDR (8)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_FILESZ) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_FILESZ (16)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_MEMSZ_DIFF) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_MEMSZ_DIFF (24)
+/**
+ *   Offsets for struct okl4_firmware_segments_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_FIRMWARE_SEGMENTS_INFO_SIZE) */
+#define OKL4_STRUCT_FIRMWARE_SEGMENTS_INFO_SIZE (8)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO_NUM_SEGMENTS) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO_NUM_SEGMENTS (0)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING0_4) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING1_5) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING2_6) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING3_7) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO_SEGMENTS) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO_SEGMENTS (8)
+/**
+ *   Offsets for struct okl4_kmmu
+ **/
+/*lint -esym(621, OKL4_STRUCT_KMMU_SIZE) */
+#define OKL4_STRUCT_KMMU_SIZE (4)
+/*lint -esym(621, OKL4_OFS_KMMU_KCAP) */
+#define OKL4_OFS_KMMU_KCAP (0)
+/**
+ *   Offsets for struct okl4_ksp_user_agent
+ **/
+/*lint -esym(621, OKL4_STRUCT_KSP_USER_AGENT_SIZE) */
+#define OKL4_STRUCT_KSP_USER_AGENT_SIZE (8)
+/*lint -esym(621, OKL4_OFS_KSP_USER_AGENT_KCAP) */
+#define OKL4_OFS_KSP_USER_AGENT_KCAP (0)
+/*lint -esym(621, OKL4_OFS_KSP_USER_AGENT_VIRQ) */
+#define OKL4_OFS_KSP_USER_AGENT_VIRQ (4)
+/**
+ *   Offsets for struct okl4_pipe_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_PIPE_DATA_SIZE) */
+#define OKL4_STRUCT_PIPE_DATA_SIZE (8)
+/*lint -esym(621, OKL4_OFS_PIPE_DATA_KCAP) */
+#define OKL4_OFS_PIPE_DATA_KCAP (0)
+/*lint -esym(621, OKL4_OFS_PIPE_DATA_VIRQ) */
+#define OKL4_OFS_PIPE_DATA_VIRQ (4)
+/**
+ *   Offsets for struct okl4_pipe_ep_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_PIPE_EP_DATA_SIZE) */
+#define OKL4_STRUCT_PIPE_EP_DATA_SIZE (16)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_RX) */
+#define OKL4_OFS_PIPE_EP_DATA_RX (0)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_RX_KCAP) */
+#define OKL4_OFS_PIPE_EP_DATA_RX_KCAP (0)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_RX_VIRQ) */
+#define OKL4_OFS_PIPE_EP_DATA_RX_VIRQ (4)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_TX) */
+#define OKL4_OFS_PIPE_EP_DATA_TX (8)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_TX_KCAP) */
+#define OKL4_OFS_PIPE_EP_DATA_TX_KCAP (8)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_TX_VIRQ) */
+#define OKL4_OFS_PIPE_EP_DATA_TX_VIRQ (12)
+/**
+ *   Offsets for struct okl4_link
+ **/
+/*lint -esym(621, OKL4_STRUCT_LINK_SIZE) */
+#define OKL4_STRUCT_LINK_SIZE (80)
+/*lint -esym(621, OKL4_OFS_LINK_NAME) */
+#define OKL4_OFS_LINK_NAME (0)
+/*lint -esym(621, OKL4_OFS_LINK_OPAQUE) */
+#define OKL4_OFS_LINK_OPAQUE (8)
+/*lint -esym(621, OKL4_OFS_LINK_PARTNER_NAME) */
+#define OKL4_OFS_LINK_PARTNER_NAME (16)
+/*lint -esym(621, OKL4_OFS_LINK_ROLE) */
+#define OKL4_OFS_LINK_ROLE (24)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING0_4) */
+#define OKL4_OFS_LINK___PADDING0_4 (28)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING1_5) */
+#define OKL4_OFS_LINK___PADDING1_5 (29)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING2_6) */
+#define OKL4_OFS_LINK___PADDING2_6 (30)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING3_7) */
+#define OKL4_OFS_LINK___PADDING3_7 (31)
+/*lint -esym(621, OKL4_OFS_LINK_TRANSPORT) */
+#define OKL4_OFS_LINK_TRANSPORT (32)
+/*lint -esym(621, OKL4_OFS_LINK_TRANSPORT_TYPE) */
+#define OKL4_OFS_LINK_TRANSPORT_TYPE (72)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING4_4) */
+#define OKL4_OFS_LINK___PADDING4_4 (76)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING5_5) */
+#define OKL4_OFS_LINK___PADDING5_5 (77)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING6_6) */
+#define OKL4_OFS_LINK___PADDING6_6 (78)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING7_7) */
+#define OKL4_OFS_LINK___PADDING7_7 (79)
+/**
+ *   Offsets for struct okl4_links
+ **/
+/*lint -esym(621, OKL4_STRUCT_LINKS_SIZE) */
+#define OKL4_STRUCT_LINKS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_LINKS_NUM_LINKS) */
+#define OKL4_OFS_LINKS_NUM_LINKS (0)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING0_4) */
+#define OKL4_OFS_LINKS___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING1_5) */
+#define OKL4_OFS_LINKS___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING2_6) */
+#define OKL4_OFS_LINKS___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING3_7) */
+#define OKL4_OFS_LINKS___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_LINKS_LINKS) */
+#define OKL4_OFS_LINKS_LINKS (8)
+/**
+ *   Offsets for struct okl4_machine_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_MACHINE_INFO_SIZE) */
+#define OKL4_STRUCT_MACHINE_INFO_SIZE (24)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO_L1_CACHE_LINE_SIZE) */
+#define OKL4_OFS_MACHINE_INFO_L1_CACHE_LINE_SIZE (0)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO_L2_CACHE_LINE_SIZE) */
+#define OKL4_OFS_MACHINE_INFO_L2_CACHE_LINE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO_NUM_CPUS) */
+#define OKL4_OFS_MACHINE_INFO_NUM_CPUS (16)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING0_4) */
+#define OKL4_OFS_MACHINE_INFO___PADDING0_4 (20)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING1_5) */
+#define OKL4_OFS_MACHINE_INFO___PADDING1_5 (21)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING2_6) */
+#define OKL4_OFS_MACHINE_INFO___PADDING2_6 (22)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING3_7) */
+#define OKL4_OFS_MACHINE_INFO___PADDING3_7 (23)
+/**
+ *   Offsets for struct okl4_merged_physpool
+ **/
+/*lint -esym(621, OKL4_STRUCT_MERGED_PHYSPOOL_SIZE) */
+#define OKL4_STRUCT_MERGED_PHYSPOOL_SIZE (16)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL_PHYS_ADDR) */
+#define OKL4_OFS_MERGED_PHYSPOOL_PHYS_ADDR (0)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL_NUM_SEGMENTS) */
+#define OKL4_OFS_MERGED_PHYSPOOL_NUM_SEGMENTS (8)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING0_4) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING1_5) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING2_6) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING3_7) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING3_7 (15)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL_SEGMENTS) */
+#define OKL4_OFS_MERGED_PHYSPOOL_SEGMENTS (16)
+/**
+ *   Offsets for struct okl4_microvisor_timer
+ **/
+/*lint -esym(621, OKL4_STRUCT_MICROVISOR_TIMER_SIZE) */
+#define OKL4_STRUCT_MICROVISOR_TIMER_SIZE (8)
+/*lint -esym(621, OKL4_OFS_MICROVISOR_TIMER_KCAP) */
+#define OKL4_OFS_MICROVISOR_TIMER_KCAP (0)
+/*lint -esym(621, OKL4_OFS_MICROVISOR_TIMER_VIRQ) */
+#define OKL4_OFS_MICROVISOR_TIMER_VIRQ (4)
+/**
+ *   Offsets for struct okl4_cpu_registers
+ **/
+/*lint -esym(621, OKL4_STRUCT_CPU_REGISTERS_SIZE) */
+#define OKL4_STRUCT_CPU_REGISTERS_SIZE (448)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X) */
+#define OKL4_OFS_CPU_REGISTERS_X (0)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_0) */
+#define OKL4_OFS_CPU_REGISTERS_X_0 (0)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_1) */
+#define OKL4_OFS_CPU_REGISTERS_X_1 (8)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_2) */
+#define OKL4_OFS_CPU_REGISTERS_X_2 (16)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_3) */
+#define OKL4_OFS_CPU_REGISTERS_X_3 (24)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_4) */
+#define OKL4_OFS_CPU_REGISTERS_X_4 (32)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_5) */
+#define OKL4_OFS_CPU_REGISTERS_X_5 (40)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_6) */
+#define OKL4_OFS_CPU_REGISTERS_X_6 (48)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_7) */
+#define OKL4_OFS_CPU_REGISTERS_X_7 (56)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_8) */
+#define OKL4_OFS_CPU_REGISTERS_X_8 (64)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_9) */
+#define OKL4_OFS_CPU_REGISTERS_X_9 (72)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_10) */
+#define OKL4_OFS_CPU_REGISTERS_X_10 (80)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_11) */
+#define OKL4_OFS_CPU_REGISTERS_X_11 (88)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_12) */
+#define OKL4_OFS_CPU_REGISTERS_X_12 (96)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_13) */
+#define OKL4_OFS_CPU_REGISTERS_X_13 (104)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_14) */
+#define OKL4_OFS_CPU_REGISTERS_X_14 (112)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_15) */
+#define OKL4_OFS_CPU_REGISTERS_X_15 (120)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_16) */
+#define OKL4_OFS_CPU_REGISTERS_X_16 (128)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_17) */
+#define OKL4_OFS_CPU_REGISTERS_X_17 (136)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_18) */
+#define OKL4_OFS_CPU_REGISTERS_X_18 (144)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_19) */
+#define OKL4_OFS_CPU_REGISTERS_X_19 (152)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_20) */
+#define OKL4_OFS_CPU_REGISTERS_X_20 (160)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_21) */
+#define OKL4_OFS_CPU_REGISTERS_X_21 (168)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_22) */
+#define OKL4_OFS_CPU_REGISTERS_X_22 (176)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_23) */
+#define OKL4_OFS_CPU_REGISTERS_X_23 (184)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_24) */
+#define OKL4_OFS_CPU_REGISTERS_X_24 (192)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_25) */
+#define OKL4_OFS_CPU_REGISTERS_X_25 (200)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_26) */
+#define OKL4_OFS_CPU_REGISTERS_X_26 (208)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_27) */
+#define OKL4_OFS_CPU_REGISTERS_X_27 (216)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_28) */
+#define OKL4_OFS_CPU_REGISTERS_X_28 (224)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_29) */
+#define OKL4_OFS_CPU_REGISTERS_X_29 (232)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_30) */
+#define OKL4_OFS_CPU_REGISTERS_X_30 (240)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SP_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_SP_EL0 (248)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_IP) */
+#define OKL4_OFS_CPU_REGISTERS_IP (256)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CPSR) */
+#define OKL4_OFS_CPU_REGISTERS_CPSR (264)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING0_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING0_4 (268)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING1_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING1_5 (269)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING2_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING2_6 (270)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING3_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING3_7 (271)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SP_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_SP_EL1 (272)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_ELR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_ELR_EL1 (280)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_EL1 (288)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_ABT) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_ABT (292)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_UND) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_UND (296)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_IRQ) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_IRQ (300)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_FIQ) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_FIQ (304)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CSSELR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CSSELR_EL1 (308)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SCTLR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_SCTLR_EL1 (312)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CPACR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CPACR_EL1 (316)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TTBR0_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TTBR0_EL1 (320)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TTBR1_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TTBR1_EL1 (328)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TCR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TCR_EL1 (336)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_DACR32_EL2) */
+#define OKL4_OFS_CPU_REGISTERS_DACR32_EL2 (344)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_IFSR32_EL2) */
+#define OKL4_OFS_CPU_REGISTERS_IFSR32_EL2 (348)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_ESR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_ESR_EL1 (352)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING4_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING4_4 (356)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING5_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING5_5 (357)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING6_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING6_6 (358)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING7_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING7_7 (359)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_FAR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_FAR_EL1 (360)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_PAR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_PAR_EL1 (368)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_MAIR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_MAIR_EL1 (376)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_VBAR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_VBAR_EL1 (384)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CONTEXTIDR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CONTEXTIDR_EL1 (392)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING8_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING8_4 (396)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING9_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING9_5 (397)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING10_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING10_6 (398)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING11_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING11_7 (399)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TPIDR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TPIDR_EL1 (400)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TPIDRRO_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_TPIDRRO_EL0 (408)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TPIDR_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_TPIDR_EL0 (416)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_PMCR_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_PMCR_EL0 (424)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING12_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING12_4 (428)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING13_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING13_5 (429)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING14_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING14_6 (430)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING15_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING15_7 (431)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_PMCCNTR_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_PMCCNTR_EL0 (432)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_FPEXC32_EL2) */
+#define OKL4_OFS_CPU_REGISTERS_FPEXC32_EL2 (440)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CNTKCTL_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CNTKCTL_EL1 (444)
+/**
+ *   Offsets for struct okl4_schedule_profile_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_SCHEDULE_PROFILE_DATA_SIZE) */
+#define OKL4_STRUCT_SCHEDULE_PROFILE_DATA_SIZE (32)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_TIMESTAMP) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_TIMESTAMP (0)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_TIME) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_TIME (8)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CONTEXT_SWITCHES) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CONTEXT_SWITCHES (16)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_MIGRATIONS) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_MIGRATIONS (20)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_HWIRQS) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_HWIRQS (24)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_VIRQS) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_VIRQS (28)
+/**
+ *   Offsets for struct okl4_shared_buffer
+ **/
+/*lint -esym(621, OKL4_STRUCT_SHARED_BUFFER_SIZE) */
+#define OKL4_STRUCT_SHARED_BUFFER_SIZE (32)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_PHYSICAL_BASE) */
+#define OKL4_OFS_SHARED_BUFFER_PHYSICAL_BASE (0)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_BASE) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_BASE (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_SIZE) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_CAP) */
+#define OKL4_OFS_SHARED_BUFFER_CAP (24)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING0_4) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING0_4 (28)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING1_5) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING1_5 (29)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING2_6) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING2_6 (30)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING3_7) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING3_7 (31)
+/**
+ *   Offsets for struct okl4_shared_buffers_array
+ **/
+/*lint -esym(621, OKL4_STRUCT_SHARED_BUFFERS_ARRAY_SIZE) */
+#define OKL4_STRUCT_SHARED_BUFFERS_ARRAY_SIZE (16)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY_BUFFERS) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY_BUFFERS (0)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY_NUM_BUFFERS) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY_NUM_BUFFERS (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING0_4) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING1_5) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING2_6) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING3_7) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING3_7 (15)
+/**
+ *   Offsets for struct _okl4_tracebuffer_buffer_header
+ **/
+/*lint -esym(621, _OKL4_STRUCT_TRACEBUFFER_BUFFER_HEADER_SIZE) */
+#define _OKL4_STRUCT_TRACEBUFFER_BUFFER_HEADER_SIZE (40)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_TIMESTAMP) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_TIMESTAMP (0)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_WRAP) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_WRAP (8)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING0_4) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING0_4 (12)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING1_5) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING1_5 (13)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING2_6) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING2_6 (14)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING3_7) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING3_7 (15)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_SIZE) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_SIZE (16)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_HEAD) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_HEAD (24)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_OFFSET) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_OFFSET (32)
+/**
+ *   Offsets for struct okl4_tracebuffer_env
+ **/
+/*lint -esym(621, OKL4_STRUCT_TRACEBUFFER_ENV_SIZE) */
+#define OKL4_STRUCT_TRACEBUFFER_ENV_SIZE (24)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT (0)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE (0)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_BASE) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_BASE (0)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_SIZE) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRQ) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRQ (16)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING0_4) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING0_4 (20)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING1_5) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING1_5 (21)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING2_6) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING2_6 (22)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING3_7) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING3_7 (23)
+/**
+ *   Offsets for struct _okl4_tracebuffer_header
+ **/
+/*lint -esym(621, _OKL4_STRUCT_TRACEBUFFER_HEADER_SIZE) */
+#define _OKL4_STRUCT_TRACEBUFFER_HEADER_SIZE (40)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_MAGIC) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_MAGIC (0)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_VERSION) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_VERSION (4)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_ID) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_ID (8)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_NUM_BUFFERS) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_NUM_BUFFERS (12)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_BUFFER_SIZE) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_BUFFER_SIZE (16)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_LOG_MASK) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_LOG_MASK (24)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_ACTIVE_BUFFER) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_ACTIVE_BUFFER (28)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_GRABBED_BUFFER) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_GRABBED_BUFFER (32)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_EMPTY_BUFFERS) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_EMPTY_BUFFERS (36)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_BUFFERS) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_BUFFERS (40)
+/**
+ *   Offsets for struct okl4_tracepoint_entry_base
+ **/
+/*lint -esym(621, OKL4_STRUCT_TRACEPOINT_ENTRY_BASE_SIZE) */
+#define OKL4_STRUCT_TRACEPOINT_ENTRY_BASE_SIZE (12)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_ENTRY_BASE_TIME_OFFSET) */
+#define OKL4_OFS_TRACEPOINT_ENTRY_BASE_TIME_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_ENTRY_BASE_MASKS) */
+#define OKL4_OFS_TRACEPOINT_ENTRY_BASE_MASKS (4)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_ENTRY_BASE_DESCRIPTION) */
+#define OKL4_OFS_TRACEPOINT_ENTRY_BASE_DESCRIPTION (8)
+/**
+ *   Offsets for struct okl4_tracepoint_unpacked_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_TRACEPOINT_UNPACKED_ENTRY_SIZE) */
+#define OKL4_STRUCT_TRACEPOINT_UNPACKED_ENTRY_SIZE (12)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY (0)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_TIME_OFFSET) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_TIME_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_MASKS) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_MASKS (4)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_DESCRIPTION) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_DESCRIPTION (8)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_DATA) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_DATA (12)
+/**
+ *   Offsets for struct okl4_vclient_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_VCLIENT_INFO_SIZE) */
+#define OKL4_STRUCT_VCLIENT_INFO_SIZE (32)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP (0)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX (0)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_KCAP) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_KCAP (0)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_SEGMENT) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_SEGMENT (4)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_VIRQ) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_VIRQ (8)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX (12)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_KCAP) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_KCAP (12)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_SEGMENT) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_SEGMENT (16)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_VIRQ) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_VIRQ (20)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_OPAQUE) */
+#define OKL4_OFS_VCLIENT_INFO_OPAQUE (24)
+/**
+ *   Offsets for struct okl4_vcpu_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_VCPU_ENTRY_SIZE) */
+#define OKL4_STRUCT_VCPU_ENTRY_SIZE (24)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_VCPU) */
+#define OKL4_OFS_VCPU_ENTRY_VCPU (0)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_IPI) */
+#define OKL4_OFS_VCPU_ENTRY_IPI (4)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_IRQ) */
+#define OKL4_OFS_VCPU_ENTRY_IRQ (8)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING0_4) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING1_5) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING2_6) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING3_7) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING3_7 (15)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_STACK_POINTER) */
+#define OKL4_OFS_VCPU_ENTRY_STACK_POINTER (16)
+/**
+ *   Offsets for struct okl4_vcpu_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_VCPU_TABLE_SIZE) */
+#define OKL4_STRUCT_VCPU_TABLE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE_NUM_VCPUS) */
+#define OKL4_OFS_VCPU_TABLE_NUM_VCPUS (0)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING0_4) */
+#define OKL4_OFS_VCPU_TABLE___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING1_5) */
+#define OKL4_OFS_VCPU_TABLE___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING2_6) */
+#define OKL4_OFS_VCPU_TABLE___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING3_7) */
+#define OKL4_OFS_VCPU_TABLE___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE_VCPU) */
+#define OKL4_OFS_VCPU_TABLE_VCPU (8)
+/**
+ *   Offsets for struct okl4_vfp_ctrl_registers
+ **/
+/*lint -esym(621, OKL4_STRUCT_VFP_CTRL_REGISTERS_SIZE) */
+#define OKL4_STRUCT_VFP_CTRL_REGISTERS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_VFP_CTRL_REGISTERS_FPSR) */
+#define OKL4_OFS_VFP_CTRL_REGISTERS_FPSR (0)
+/*lint -esym(621, OKL4_OFS_VFP_CTRL_REGISTERS_FPCR) */
+#define OKL4_OFS_VFP_CTRL_REGISTERS_FPCR (4)
+/**
+ *   Offsets for struct okl4_vfp_register
+ **/
+/*lint -esym(621, OKL4_STRUCT_VFP_REGISTER_SIZE) */
+#define OKL4_STRUCT_VFP_REGISTER_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES) */
+#define OKL4_OFS_VFP_REGISTER___BYTES (0)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_0) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_0 (0)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_1) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_1 (1)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_2) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_2 (2)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_3) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_3 (3)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_4) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_4 (4)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_5) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_5 (5)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_6) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_6 (6)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_7) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_7 (7)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_8) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_8 (8)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_9) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_9 (9)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_10) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_10 (10)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_11) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_11 (11)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_12) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_12 (12)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_13) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_13 (13)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_14) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_14 (14)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_15) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_15 (15)
+/**
+ *   Offsets for struct okl4_vfp_registers
+ **/
+/*lint -esym(621, OKL4_STRUCT_VFP_REGISTERS_SIZE) */
+#define OKL4_STRUCT_VFP_REGISTERS_SIZE (528)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V0) */
+#define OKL4_OFS_VFP_REGISTERS_V0 (0)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V1) */
+#define OKL4_OFS_VFP_REGISTERS_V1 (16)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V2) */
+#define OKL4_OFS_VFP_REGISTERS_V2 (32)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V3) */
+#define OKL4_OFS_VFP_REGISTERS_V3 (48)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V4) */
+#define OKL4_OFS_VFP_REGISTERS_V4 (64)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V5) */
+#define OKL4_OFS_VFP_REGISTERS_V5 (80)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V6) */
+#define OKL4_OFS_VFP_REGISTERS_V6 (96)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V7) */
+#define OKL4_OFS_VFP_REGISTERS_V7 (112)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V8) */
+#define OKL4_OFS_VFP_REGISTERS_V8 (128)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V9) */
+#define OKL4_OFS_VFP_REGISTERS_V9 (144)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V10) */
+#define OKL4_OFS_VFP_REGISTERS_V10 (160)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V11) */
+#define OKL4_OFS_VFP_REGISTERS_V11 (176)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V12) */
+#define OKL4_OFS_VFP_REGISTERS_V12 (192)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V13) */
+#define OKL4_OFS_VFP_REGISTERS_V13 (208)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V14) */
+#define OKL4_OFS_VFP_REGISTERS_V14 (224)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V15) */
+#define OKL4_OFS_VFP_REGISTERS_V15 (240)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V16) */
+#define OKL4_OFS_VFP_REGISTERS_V16 (256)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V17) */
+#define OKL4_OFS_VFP_REGISTERS_V17 (272)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V18) */
+#define OKL4_OFS_VFP_REGISTERS_V18 (288)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V19) */
+#define OKL4_OFS_VFP_REGISTERS_V19 (304)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V20) */
+#define OKL4_OFS_VFP_REGISTERS_V20 (320)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V21) */
+#define OKL4_OFS_VFP_REGISTERS_V21 (336)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V22) */
+#define OKL4_OFS_VFP_REGISTERS_V22 (352)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V23) */
+#define OKL4_OFS_VFP_REGISTERS_V23 (368)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V24) */
+#define OKL4_OFS_VFP_REGISTERS_V24 (384)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V25) */
+#define OKL4_OFS_VFP_REGISTERS_V25 (400)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V26) */
+#define OKL4_OFS_VFP_REGISTERS_V26 (416)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V27) */
+#define OKL4_OFS_VFP_REGISTERS_V27 (432)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V28) */
+#define OKL4_OFS_VFP_REGISTERS_V28 (448)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V29) */
+#define OKL4_OFS_VFP_REGISTERS_V29 (464)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V30) */
+#define OKL4_OFS_VFP_REGISTERS_V30 (480)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V31) */
+#define OKL4_OFS_VFP_REGISTERS_V31 (496)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_CONTROL) */
+#define OKL4_OFS_VFP_REGISTERS_CONTROL (512)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_CONTROL_FPSR) */
+#define OKL4_OFS_VFP_REGISTERS_CONTROL_FPSR (512)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_CONTROL_FPCR) */
+#define OKL4_OFS_VFP_REGISTERS_CONTROL_FPCR (516)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING0_8) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING0_8 (520)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING1_9) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING1_9 (521)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING2_10) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING2_10 (522)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING3_11) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING3_11 (523)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING4_12) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING4_12 (524)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING5_13) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING5_13 (525)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING6_14) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING6_14 (526)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING7_15) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING7_15 (527)
+/**
+ *   Offsets for struct okl4_virtmem_pool
+ **/
+/*lint -esym(621, OKL4_STRUCT_VIRTMEM_POOL_SIZE) */
+#define OKL4_STRUCT_VIRTMEM_POOL_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL_RANGE) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL_RANGE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_BASE) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_BASE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_SIZE) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_SIZE (8)
+/**
+ *   Offsets for struct okl4_virtual_interrupt_lines
+ **/
+/*lint -esym(621, OKL4_STRUCT_VIRTUAL_INTERRUPT_LINES_SIZE) */
+#define OKL4_STRUCT_VIRTUAL_INTERRUPT_LINES_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES_NUM_LINES) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES_NUM_LINES (0)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING0_4) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING1_5) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING2_6) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING3_7) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES_LINES) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES_LINES (8)
+/**
+ *   Offsets for struct okl4_vserver_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVER_INFO_SIZE) */
+#define OKL4_STRUCT_VSERVER_INFO_SIZE (32)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS (0)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS_DATA) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS_DATA (0)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS_MAX_MESSAGES) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS_MAX_MESSAGES (8)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING0_4) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING1_5) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING2_6) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING3_7) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING3_7 (15)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS_MESSAGE_SIZE) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS_MESSAGE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_NUM_CLIENTS) */
+#define OKL4_OFS_VSERVER_INFO_NUM_CLIENTS (24)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING0_4) */
+#define OKL4_OFS_VSERVER_INFO___PADDING0_4 (28)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING1_5) */
+#define OKL4_OFS_VSERVER_INFO___PADDING1_5 (29)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING2_6) */
+#define OKL4_OFS_VSERVER_INFO___PADDING2_6 (30)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING3_7) */
+#define OKL4_OFS_VSERVER_INFO___PADDING3_7 (31)
+/**
+ *   Offsets for struct okl4_vservices_service_descriptor
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVICES_SERVICE_DESCRIPTOR_SIZE) */
+#define OKL4_STRUCT_VSERVICES_SERVICE_DESCRIPTOR_SIZE (24)
+/*lint -esym(621, OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_NAME) */
+#define OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_NAME (0)
+/*lint -esym(621, OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_PROTOCOL) */
+#define OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_PROTOCOL (8)
+/*lint -esym(621, OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_RESERVED) */
+#define OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_RESERVED (16)
+/**
+ *   Offsets for struct okl4_vservices_transport_microvisor
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVICES_TRANSPORT_MICROVISOR_SIZE) */
+#define OKL4_STRUCT_VSERVICES_TRANSPORT_MICROVISOR_SIZE (120)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_IS_SERVER) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_IS_SERVER (0)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING0_1) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING0_1 (1)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING1_2) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING1_2 (2)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING2_3) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING2_3 (3)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_TYPE) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_TYPE (4)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_U) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_U (8)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN (72)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_NUM_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_NUM_LINES (72)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING0_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING0_4 (76)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING1_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING1_5 (77)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING2_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING2_6 (78)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING3_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING3_7 (79)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_LINES (80)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT (88)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_NUM_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_NUM_LINES (88)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING0_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING0_4 (92)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING1_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING1_5 (93)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING2_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING2_6 (94)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING3_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING3_7 (95)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_LINES (96)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_NUM_SERVICES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_NUM_SERVICES (104)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING3_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING3_4 (108)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING4_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING4_5 (109)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING5_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING5_6 (110)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING6_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING6_7 (111)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_SERVICES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_SERVICES (112)
+/**
+ *   Offsets for struct okl4_vservices_transports
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVICES_TRANSPORTS_SIZE) */
+#define OKL4_STRUCT_VSERVICES_TRANSPORTS_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS_NUM_TRANSPORTS) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS_NUM_TRANSPORTS (0)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING0_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING1_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING2_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING3_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS_TRANSPORTS) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS_TRANSPORTS (8)
+
+#endif /* ASSEMBLY */
+
+#endif /* __AUTO__MICROVISOR_OFFSETS_H__ */
+
diff --git a/include/microvisor/kernel/syscalls.h b/include/microvisor/kernel/syscalls.h
new file mode 100644
index 0000000..fdc2c0d
--- /dev/null
+++ b/include/microvisor/kernel/syscalls.h
@@ -0,0 +1,6114 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+
+
+/** @addtogroup lib_microvisor
+ * @{
+ */
+
+#ifndef __AUTO__USER_SYSCALLS_H__
+#define __AUTO__USER_SYSCALLS_H__
+
+/**
+ * @cond no_doc
+ */
+#if defined(ASSEMBLY)
+#define __hvc_str(x) x
+#else
+#define _hvc_str(x) #x
+#define __hvc_str(x) _hvc_str(x)
+#endif
+#if (defined(__GNUC__) && !defined(__clang__)) && \
+    (__GNUC__ < 4 || ((__GNUC__ == 4) && (__GNUC_MINOR__ < 5)))
+#if defined(__thumb2__)
+#define hvc(i) __hvc_str(.hword 0xf7e0 | (i & 0xf); .hword 8000 | (i >> 4) @ HVC)
+#else
+#define hvc(i) __hvc_str(.word 0xe1400070 | (i & 0xf) | (i >> 4 << 8) @ HVC)
+#endif
+#else
+#if defined(__ARM_EABI__)
+#if defined(ASSEMBLY) && !defined(__clang__)
+    .arch_extension virt
+#elif !defined(__clang__)
+__asm__(
+    ".arch_extension virt\n"
+);
+#endif
+#endif
+#define hvc(i) __hvc_str(hvc i)
+#endif
+/**
+ * @endcond
+ */
+
+#if !defined(ASSEMBLY)
+
+#define OKL4_OK OKL4_ERROR_OK
+
+/** @} */
+
+/*
+ * Syscall prototypes.
+ */
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_PROCESS_RECV
+ *
+ * @param axon_id
+ * @param transfer_limit
+ *
+ * @retval error
+ * @retval send_empty
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_axon_process_recv_return
+_okl4_sys_axon_process_recv(okl4_kcap_t axon_id, okl4_lsize_t transfer_limit)
+{
+    struct _okl4_sys_axon_process_recv_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(transfer_limit        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((transfer_limit >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5184)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    result.send_empty = (okl4_bool_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_axon_process_recv_return
+_okl4_sys_axon_process_recv(okl4_kcap_t axon_id, okl4_lsize_t transfer_limit)
+{
+    struct _okl4_sys_axon_process_recv_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)transfer_limit;
+    __asm__ __volatile__(
+            "" hvc(5184) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.send_empty = (okl4_bool_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_HALTED
+ *
+ * @param axon_id
+ * @param halted
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_halted(okl4_kcap_t axon_id, okl4_bool_t halted)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)halted;
+    __asm__ __volatile__(
+            ""hvc(5186)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_halted(okl4_kcap_t axon_id, okl4_bool_t halted)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)halted;
+    __asm__ __volatile__(
+            "" hvc(5186) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_RECV_AREA
+ *
+ * @param axon_id
+ * @param base
+ * @param size
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+        okl4_lsize_t size)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(base        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((base >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)(size        & 0xffffffff);
+    register uint32_t r4 asm("r4") = (uint32_t)((size >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5187)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+        okl4_lsize_t size)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)base;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+    __asm__ __volatile__(
+            "" hvc(5187) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_RECV_QUEUE
+ *
+ * @param axon_id
+ * @param queue
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(queue        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((queue >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5188)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)queue;
+    __asm__ __volatile__(
+            "" hvc(5188) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_RECV_SEGMENT
+ *
+ * @param axon_id
+ * @param segment_id
+ * @param segment_base
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+        okl4_laddr_t segment_base)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)segment_id;
+    register uint32_t r2 asm("r2") = (uint32_t)(segment_base        & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)((segment_base >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5189)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+        okl4_laddr_t segment_base)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)segment_id;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_base;
+    __asm__ __volatile__(
+            "" hvc(5189) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_SEND_AREA
+ *
+ * @param axon_id
+ * @param base
+ * @param size
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+        okl4_lsize_t size)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(base        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((base >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)(size        & 0xffffffff);
+    register uint32_t r4 asm("r4") = (uint32_t)((size >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5190)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+        okl4_lsize_t size)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)base;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+    __asm__ __volatile__(
+            "" hvc(5190) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_SEND_QUEUE
+ *
+ * @param axon_id
+ * @param queue
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(queue        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((queue >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5191)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)queue;
+    __asm__ __volatile__(
+            "" hvc(5191) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_SEND_SEGMENT
+ *
+ * @param axon_id
+ * @param segment_id
+ * @param segment_base
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+        okl4_laddr_t segment_base)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)segment_id;
+    register uint32_t r2 asm("r2") = (uint32_t)(segment_base        & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)((segment_base >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5192)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+        okl4_laddr_t segment_base)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)segment_id;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_base;
+    __asm__ __volatile__(
+            "" hvc(5192) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_TRIGGER_SEND
+ *
+ * @param axon_id
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_trigger_send(okl4_kcap_t axon_id)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    __asm__ __volatile__(
+            ""hvc(5185)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_trigger_send(okl4_kcap_t axon_id)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    __asm__ __volatile__(
+            "" hvc(5185) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Acknowledge the delivery of an interrupt.
+ *
+ *    @details
+ *    This API returns the number and source of the highest-priority
+ *        enabled,
+ *    pending and inactive interrupt that is targeted at the calling vCPU
+ *    and has higher priority than the calling vCPU's running group
+ *        priority.
+ *
+ *    The returned interrupt is marked as active, and will not be returned
+ *        again
+ *    by this function until @ref okl4_sys_interrupt_eoi is invoked
+ *        specifying the
+ *    same interrupt number and source. The vCPU's running interrupt
+ *        priority is
+ *    raised to the priority of the returned interrupt. This will typically
+ *        result
+ *    in the de-assertion of the vCPU's virtual IRQ line.
+ *
+ *    If no such interrupt exists, interrupt number 1023 is returned. If
+ *        the
+ *    returned interrupt number is 16 or greater, the source ID is 0;
+ *        otherwise it
+ *    is the vCPU ID of the vCPU that raised the interrupt (which is always
+ *        in the
+ *    same Cell as the caller).
+ *
+ *    @note Invoking this API is equivalent to reading from the GIC CPU
+ *    Interface's Interrupt Acknowledge Register (\p GICC_IAR).
+ *
+ *
+ * @retval irq
+ *    An interrupt line number for the virtual GIC.
+ * @retval source
+ *    The ID of the originating vCPU of a Software-Generated Interrupt.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_ack_return
+_okl4_sys_interrupt_ack(void)
+{
+    struct _okl4_sys_interrupt_ack_return result;
+
+    register uint32_t r0 asm("r0");
+    register uint32_t r1 asm("r1");
+    __asm__ __volatile__(
+            ""hvc(5128)"\n\t"
+            : "=r"(r0), "=r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    result.irq = (okl4_interrupt_number_t)(r0);
+    result.source = (uint8_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_ack_return
+_okl4_sys_interrupt_ack(void)
+{
+    struct _okl4_sys_interrupt_ack_return result;
+
+    register okl4_register_t x0 asm("x0");
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5128) "\n\t"
+            : "=r"(x0), "=r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.irq = (okl4_interrupt_number_t)(x0);
+    result.source = (uint8_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Register a vCPU as the handler of an interrupt.
+ *
+ *    @details
+ *    The Microvisor virtual GIC API permits an interrupt source to be
+ *        dynamically
+ *    assigned to a specific IRQ number in a Cell or vCPU. An interrupt can
+ *        only
+ *    be assigned to one IRQ number, and one Cell or vCPU, at a time. This
+ *    operation attaches the interrupt to a vCPU as a private interrupt.
+ *
+ *    Interrupt sources are addressed using capabilities. This operation,
+ *        given
+ *    a capability for an interrupt that is not currently attached to any
+ *        handler,
+ *    can attach the interrupt at a given unused IRQ number. If the IRQ
+ *        number
+ *    is between 16 and 31 (the GIC Private Peripheral Interrupt range), it
+ *        will
+ *    be attached to the specified vCPU; if it is between 32 and 1019 (the
+ *        GIC
+ *    Shared Peripheral Interrupt range), it will return an error.
+ *
+ *    @note The Software Generated Interrupt range, from 0 to 15, is
+ *        reserved
+ *    and cannot be used to attach interrupt source capabilities.
+ *
+ *    @note In most cases, interrupt sources are attached at system
+ *        construction
+ *    time by the OK Tool. It is not normally necessary to attach an
+ *        interrupt
+ *    source before using it.
+ *
+ * @param vcpu_cap
+ *    A virtual CPU capability.
+ * @param irq_cap
+ *    A virtual interrupt capability.
+ * @param irq_num
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_private(okl4_kcap_t vcpu_cap, okl4_kcap_t irq_cap,
+        okl4_interrupt_number_t irq_num)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu_cap;
+    register uint32_t r1 asm("r1") = (uint32_t)irq_cap;
+    register uint32_t r2 asm("r2") = (uint32_t)irq_num;
+    __asm__ __volatile__(
+            ""hvc(5134)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_private(okl4_kcap_t vcpu_cap, okl4_kcap_t irq_cap,
+        okl4_interrupt_number_t irq_num)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu_cap;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)irq_cap;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)irq_num;
+    __asm__ __volatile__(
+            "" hvc(5134) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Register a Cell (domain) as the handler of an interrupt.
+ *
+ *    @details
+ *    The Microvisor virtual GIC API permits an interrupt source to be
+ *        dynamically
+ *    assigned to a specific IRQ number in a Cell or vCPU. An interrupt can
+ *        only
+ *    be assigned to one IRQ number, and one Cell or vCPU, at a time. This
+ *    operation attaches the interrupt to a Cell as a shared interrupt.
+ *
+ *    Interrupt sources are addressed using capabilities. This operation,
+ *        given
+ *    a capability for an interrupt that is not currently attached to any
+ *        handler,
+ *    can attach the interrupt at a given unused IRQ number. If the IRQ
+ *        number
+ *    is between 0 and 31 (the GIC SGI or Private Peripheral Interrupt
+ *        range), it
+ *    will return an error; if it is between 32 and 1019 (the GIC
+ *    Shared Peripheral Interrupt range), it will be attached to the
+ *        specified
+ *    Cell.
+ *
+ *    @note In most cases, interrupt sources are attached at system
+ *        construction
+ *    time by the OK Tool. It is not normally necessary to attach an
+ *        interrupt
+ *    source before using it.
+ *
+ * @param domain_cap
+ *    A domain capability.
+ * @param irq_cap
+ *    A virtual interrupt capability.
+ * @param irq_num
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_shared(okl4_kcap_t domain_cap, okl4_kcap_t irq_cap,
+        okl4_interrupt_number_t irq_num)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)domain_cap;
+    register uint32_t r1 asm("r1") = (uint32_t)irq_cap;
+    register uint32_t r2 asm("r2") = (uint32_t)irq_num;
+    __asm__ __volatile__(
+            ""hvc(5135)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_shared(okl4_kcap_t domain_cap, okl4_kcap_t irq_cap,
+        okl4_interrupt_number_t irq_num)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)domain_cap;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)irq_cap;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)irq_num;
+    __asm__ __volatile__(
+            "" hvc(5135) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Unregister an interrupt.
+ *
+ *    @details
+ *    Detach the given interrupt source from its registered handler. The
+ *        interrupt
+ *    will be deactivated and disabled, and will not be delivered again
+ *        until it
+ *    is reattached. However, if it is configured in edge triggering mode,
+ *        its
+ *    pending state will be preserved.
+ *
+ * @param irq_cap
+ *    A virtual interrupt capability.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_detach(okl4_kcap_t irq_cap)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq_cap;
+    __asm__ __volatile__(
+            ""hvc(5136)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_detach(okl4_kcap_t irq_cap)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq_cap;
+    __asm__ __volatile__(
+            "" hvc(5136) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable the interrupt distributor.
+ *
+ *    @details
+ *    This API enables the interrupt distributor, in the same form as
+ *        writing to
+ *    the enable bit in (\p GICD_CTLR).
+ *
+ * @param enable
+ *    A boolean value for GIC distributor enable.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_dist_enable(okl4_bool_t enable)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)enable;
+    __asm__ __volatile__(
+            ""hvc(5133)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_dist_enable(okl4_bool_t enable)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)enable;
+    __asm__ __volatile__(
+            "" hvc(5133) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Signal the end of the interrupt handling routine.
+ *
+ *    @details
+ *    This API informs the virtual GIC that handling for a given interrupt
+ *        has
+ *    completed. It marks the interrupt as inactive, and decreases the
+ *        running
+ *    interrupt priority of the calling vCPU. This may cause immediate
+ *        delivery of
+ *    another interrupt, possibly with the same number, if one is enabled
+ *        and
+ *    pending.
+ *
+ *    The specified interrupt number and source must match the active
+ *        interrupt
+ *    that was most recently returned by an @ref okl4_sys_interrupt_ack
+ *    invocation. If multiple interrupts have been acknowledged and not yet
+ *        ended,
+ *    they must be ended in the reversed order of their acknowledgement.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC CPU
+ *    Interface's End of Interrupt Register (\p GICC_EOIR), with \p EOImode
+ *    set to 0 in \p GICC_CTLR.
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param source
+ *    The ID of the originating vCPU of a Software-Generated Interrupt.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_eoi(okl4_interrupt_number_t irq, uint8_t source)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)source;
+    __asm__ __volatile__(
+            ""hvc(5129)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_eoi(okl4_interrupt_number_t irq, uint8_t source)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)source;
+    __asm__ __volatile__(
+            "" hvc(5129) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Retrieve the highest-priority pending interrupt.
+ *
+ *    @details
+ *    This API returns the number and source of the highest-priority
+ *        enabled,
+ *    pending and inactive interrupt that is targeted at the calling vCPU
+ *    and has higher priority than the calling vCPU's running group
+ *        priority.
+ *
+ *    If no such interrupt exists, interrupt number 1023 is returned. If
+ *        the
+ *    returned interrupt number is 16 or greater, the source ID is 0;
+ *        otherwise it
+ *    is the vCPU ID of the vCPU that raised the interrupt (which is always
+ *        in the
+ *    same Cell as the caller).
+ *
+ *    @note Invoking this API is equivalent to reading from the GIC CPU
+ *    Interface's Highest Priority Pending Interrupt Register (\p
+ *        GICC_HPPIR).
+ *
+ *
+ * @retval irq
+ *    An interrupt line number for the virtual GIC.
+ * @retval source
+ *    The ID of the originating vCPU of a Software-Generated Interrupt.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_highest_priority_pending_return
+_okl4_sys_interrupt_get_highest_priority_pending(void)
+{
+    struct _okl4_sys_interrupt_get_highest_priority_pending_return result;
+
+    register uint32_t r0 asm("r0");
+    register uint32_t r1 asm("r1");
+    __asm__ __volatile__(
+            ""hvc(5137)"\n\t"
+            : "=r"(r0), "=r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    result.irq = (okl4_interrupt_number_t)(r0);
+    result.source = (uint8_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_highest_priority_pending_return
+_okl4_sys_interrupt_get_highest_priority_pending(void)
+{
+    struct _okl4_sys_interrupt_get_highest_priority_pending_return result;
+
+    register okl4_register_t x0 asm("x0");
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5137) "\n\t"
+            : "=r"(x0), "=r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.irq = (okl4_interrupt_number_t)(x0);
+    result.source = (uint8_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Fetch the payload flags of a virtual interrupt.
+ *
+ *    @details
+ *    This fetches and clears the accumulated payload flags for a virtual
+ *    interrupt that has been raised by the Microvisor, or by a vCPU
+ *        invoking
+ *    the @ref okl4_sys_vinterrupt_raise API.
+ *
+ *    If the virtual interrupt is configured for level triggering, clearing
+ *        the
+ *    accumulated flags by calling this function will also clear the
+ *        pending state
+ *    of the interrupt.
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ * @retval payload
+ *    Accumulated virtual interrupt payload flags.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_payload_return
+_okl4_sys_interrupt_get_payload(okl4_interrupt_number_t irq)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp payload_tmp;
+    struct _okl4_sys_interrupt_get_payload_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5132)"\n\t"
+            : "=r"(r1), "=r"(r2), "+r"(r0)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    payload_tmp.words.lo = r1;
+    payload_tmp.words.hi = r2;
+    result.payload = (okl4_virq_flags_t)(payload_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_payload_return
+_okl4_sys_interrupt_get_payload(okl4_interrupt_number_t irq)
+{
+    struct _okl4_sys_interrupt_get_payload_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5132) "\n\t"
+            : "=r"(x1), "+r"(x0)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.payload = (okl4_virq_flags_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query the number of supported CPUs and interrupt lines.
+ *
+ *    @details
+ *    This API returns the number of CPUs and interrupt lines supported by
+ *        the
+ *    virtual interrupt controller, in the same form as is found in the GIC
+ *    Distributor's Interrupt Controller Type Register (\p GICD_TYPER), in
+ *    the \p CPUNumber and \p ITLinesNumber fields.
+ *
+ *
+ * @retval cpunumber
+ *    The number of supported target CPUs, minus 1.
+ * @retval itnumber
+ *    The number of supported groups of 32 interrupt lines, minus 1.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_limits_return
+_okl4_sys_interrupt_limits(void)
+{
+    struct _okl4_sys_interrupt_limits_return result;
+
+    register uint32_t r0 asm("r0");
+    register uint32_t r1 asm("r1");
+    __asm__ __volatile__(
+            ""hvc(5138)"\n\t"
+            : "=r"(r0), "=r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    result.cpunumber = (okl4_count_t)(r0);
+    result.itnumber = (okl4_count_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_limits_return
+_okl4_sys_interrupt_limits(void)
+{
+    struct _okl4_sys_interrupt_limits_return result;
+
+    register okl4_register_t x0 asm("x0");
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5138) "\n\t"
+            : "=r"(x0), "=r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.cpunumber = (okl4_count_t)(x0);
+    result.itnumber = (okl4_count_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Disable delivery of an interrupt.
+ *
+ *    @detail
+ *    This prevents future delivery of the specified interrupt. It does not
+ *    affect any currently active delivery (that is, end-of-interrupt must
+ *    still be called). It also does not affect the pending state, so it
+ *        cannot
+ *    cause loss of edge-triggered interrupts.
+ *
+ *    @note Invoking this API is equivalent to writing a single bit to one
+ *        of the
+ *    GIC Distributor's Interrupt Clear-Enable Registers (\p
+ *        GICD_ICENABLERn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_mask(okl4_interrupt_number_t irq)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    __asm__ __volatile__(
+            ""hvc(5130)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_mask(okl4_interrupt_number_t irq)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    __asm__ __volatile__(
+            "" hvc(5130) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Raise a Software-Generated Interrupt.
+ *
+ *    @detail
+ *    This allows a Software-Generated Interrupt (with interrupt number
+ *        between
+ *    0 and 15) to be raised, targeted at a specified set of vCPUs within
+ *        the
+ *    same Cell. No capability is required, but interrupts cannot be raised
+ *        to
+ *    other Cells with this API.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC
+ *        Distributor's
+ *    Software Generated Interrupt Register (\p GICD_SGIR).
+ *
+ *    @note This API is distinct from the @ref okl4_sys_vinterrupt_raise
+ *        API,
+ *    which raises a virtual interrupt source which may communicate across
+ *    Cell boundaries, and requires an explicit capability.
+ *
+ * @param sgir
+ *    A description of the Software-Generated Interrupt to raise.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_raise(okl4_gicd_sgir_t sgir)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)sgir;
+    __asm__ __volatile__(
+            ""hvc(5145)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_raise(okl4_gicd_sgir_t sgir)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)sgir;
+    __asm__ __volatile__(
+            "" hvc(5145) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Set the interrupt priority binary point for the calling vCPU.
+ *
+ *    @details
+ *    The GIC splits IRQ priority values into two subfields: the group
+ *        priority
+ *    and the subpriority. The binary point is the index of the most
+ *        significant
+ *    bit of the subpriority (that is, one less than the number of
+ *        subpriority
+ *    bits).
+ *
+ *    An interrupt can preempt another active interrupt only if its group
+ *        priority
+ *    is higher than the running group priority; the subpriority is ignored
+ *        for
+ *    this comparison. The subpriority is used to determine which of two
+ *        equal
+ *    priority interrupts will be delivered first.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC CPU
+ *    Interface's Binary Point Register (\p GICC_BPR).
+ *
+ * @param binary_point
+ *    The number of bits in the subpriority field, minus 1.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_binary_point(uint8_t binary_point)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)binary_point;
+    __asm__ __volatile__(
+            ""hvc(5139)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_binary_point(uint8_t binary_point)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)binary_point;
+    __asm__ __volatile__(
+            "" hvc(5139) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Change the configuration of an interrupt.
+ *
+ *    @detail
+ *    This sets the triggering type of a specified interrupt to either
+ *    edge or level triggering.
+ *
+ *    The specified interrupt must be disabled.
+ *
+ *    @note Some interrupt sources only support one triggering type. In
+ *        this case,
+ *    calling this API for the interrupt will have no effect.
+ *
+ *    @note Invoking this API is equivalent to writing a single two-bit
+ *        field of
+ *    one of the GIC Distributor's Interrupt Configuration Registers (\p
+ *    GICD_ICFGRn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param icfgr
+ *    The configuration bits for the interrupt line.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_config(okl4_interrupt_number_t irq,
+        okl4_gicd_icfgr_t icfgr)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)icfgr;
+    __asm__ __volatile__(
+            ""hvc(5140)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_config(okl4_interrupt_number_t irq,
+        okl4_gicd_icfgr_t icfgr)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)icfgr;
+    __asm__ __volatile__(
+            "" hvc(5140) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable or disable the signaling of interrupts to the vCPU.
+ *
+ *    @details
+ *    Enable or disable the signaling of interrupts by the virtual CPU
+ *        interface
+ *    to the connected vCPU.
+ *
+ *    @note Interrupt signalling is initially disabled, as required by the
+ *        GIC
+ *    API specification. This API must therefore be invoked at least once
+ *        before
+ *    any interrupts will be delivered.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC CPU
+ *    Interface's Control Register (\p GICC_CTLR) using the "GICv1 without
+ *    Security Extensions or Non-Secure" format, which contains only a
+ *        single
+ *    enable bit.
+ *
+ * @param enable
+ *    A boolean value for GIC distributor enable.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_control(okl4_bool_t enable)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)enable;
+    __asm__ __volatile__(
+            ""hvc(5141)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_control(okl4_bool_t enable)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)enable;
+    __asm__ __volatile__(
+            "" hvc(5141) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Change the delivery priority of an interrupt.
+ *
+ *    @detail
+ *    This changes the delivery priority of an interrupt. It has no
+ *        immediate
+ *    effect on currently active interrupts, but will take effect once the
+ *    interrupt is deactivated.
+ *
+ *    @note The number of significant bits in this value is
+ *    implementation-defined. In this configuration, 4 significant priority
+ *    bits are implemented. The most significant bit is always at the high
+ *        end
+ *    of the priority byte; that is, at bit 7.
+ *
+ *    @note Smaller values represent higher priority. The highest possible
+ *    priority is 0; the lowest possible priority has all implemented bits
+ *        set,
+ *    and in this implementation is currently 0xf0.
+ *
+ *    @note Invoking this API is equivalent to writing a single byte of one
+ *        of the
+ *    GIC Distributor's Interrupt Priority Registers (\p GICD_IPRIORITYn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param priority
+ *    A GIC priority value in the range 0-240.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority(okl4_interrupt_number_t irq, uint8_t priority)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)priority;
+    __asm__ __volatile__(
+            ""hvc(5142)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority(okl4_interrupt_number_t irq, uint8_t priority)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)priority;
+    __asm__ __volatile__(
+            "" hvc(5142) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Set the minimum interrupt priority of the calling vCPU.
+ *
+ *    @details
+ *    This API sets the calling vCPU's minimum running interrupt priority.
+ *    Interrupts will only be delivered if they have priority higher than
+ *        this
+ *    value.
+ *
+ *    @note Higher priority corresponds to a lower priority value; i.e.,
+ *        the
+ *    highest priority value is 0.
+ *
+ *    @note The priority mask is initially set to 0, which prevents all
+ *        interrupt
+ *    delivery, as required by the GIC API specification. This API must
+ *        therefore
+ *    be invoked at least once before any interrupts will be delivered.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC CPU
+ *    Interface's Interrupt Priority Mask Register (\p GICC_PMR).
+ *
+ * @param priority_mask
+ *    A GIC priority value in the range 0-240.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority_mask(uint8_t priority_mask)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)priority_mask;
+    __asm__ __volatile__(
+            ""hvc(5143)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority_mask(uint8_t priority_mask)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)priority_mask;
+    __asm__ __volatile__(
+            "" hvc(5143) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Change the delivery targets of a shared interrupt.
+ *
+ *    @detail
+ *    This sets the subset of a Cell's vCPUs to which the specified shared
+ *    interrupt (with an interrupt number between 32 and 1019) can be
+ *        delivered.
+ *    The target vCPUs are specified by an 8-bit bitfield. Note that no
+ *        more
+ *    than 8 targets are supported by the GIC API, so vCPUs with IDs beyond
+ *        8
+ *    will never receive interrupts.
+ *
+ *    @note The GIC API does not specify how or when the implementation
+ *        selects a
+ *    target for interrupt delivery. Most hardware implementations deliver
+ *        to
+ *    all possible targets simultaneously, and then cancel all but the
+ *        first to
+ *    be acknowledged. In the interests of efficiency, the OKL4 Microvisor
+ *        does
+ *    not implement this behaviour; instead, it chooses an arbitrary target
+ *        when
+ *    the interrupt first becomes deliverable.
+ *
+ *    @note Invoking this API is equivalent to writing a single byte of one
+ *        of the
+ *    GIC Distributor's Interrupt Targets Registers (\p GICD_ITARGETSRn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param cpu_mask
+ *    Bitmask of vCPU IDs.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_targets(okl4_interrupt_number_t irq, uint8_t cpu_mask)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)cpu_mask;
+    __asm__ __volatile__(
+            ""hvc(5144)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_targets(okl4_interrupt_number_t irq, uint8_t cpu_mask)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)cpu_mask;
+    __asm__ __volatile__(
+            "" hvc(5144) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable delivery of an interrupt.
+ *
+ *    @detail
+ *    This permits delivery of the specified interrupt, once it is pending
+ *        and
+ *    inactive and has sufficiently high priority.
+ *
+ *    @note Invoking this API is equivalent to writing a single bit to one
+ *        of the
+ *    GIC Distributor's Interrupt Set-Enable Registers (\p
+ *        GICD_ISENABLERn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_unmask(okl4_interrupt_number_t irq)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    __asm__ __volatile__(
+            ""hvc(5131)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_unmask(okl4_interrupt_number_t irq)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    __asm__ __volatile__(
+            "" hvc(5131) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enter the kernel interactive debugger.
+ *
+ * @details
+ * This is available on a debug build of the kernel, otherwise the operation
+ *     is a
+ * no-op.
+ *
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_kdb_interact(void)
+{
+    __asm__ __volatile__(
+            ""hvc(5120)"\n\t"
+            :
+            :
+            : "cc", "memory", "r0", "r1", "r2", "r3", "r4", "r5"
+            );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_kdb_interact(void)
+{
+    __asm__ __volatile__(
+            "" hvc(5120) "\n\t"
+            :
+            :
+            : "cc", "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Set the debug name of the addressed kernel object.
+ *
+ *    @details
+ *    The debug version of the Microvisor kernel supports naming of kernel
+ *        objects
+ *    to aid debugging. The object names are visible to external debuggers
+ *        such
+ *    as a JTAG tool, as well as the in-built interactive kernel debugger.
+ *
+ *    The target object may be any Microvisor object for which the caller
+ *        has a
+ *    capability with the master rights.
+ *
+ *    Debug names may be up to 16 characters long, with four characters
+ *        stored per
+ *    \p name[x] argument in little-endian order (on a 32-bit machine).
+ *
+ * @param object
+ *    The target kernel object id.
+ * @param name0
+ * @param name1
+ * @param name2
+ * @param name3
+ *
+ * @retval error
+ *    Resulting error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_kdb_set_object_name(okl4_kcap_t object, uint32_t name0, uint32_t name1
+        , uint32_t name2, uint32_t name3)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)object;
+    register uint32_t r1 asm("r1") = (uint32_t)name0;
+    register uint32_t r2 asm("r2") = (uint32_t)name1;
+    register uint32_t r3 asm("r3") = (uint32_t)name2;
+    register uint32_t r4 asm("r4") = (uint32_t)name3;
+    __asm__ __volatile__(
+            ""hvc(5121)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_kdb_set_object_name(okl4_kcap_t object, uint32_t name0, uint32_t name1
+        , uint32_t name2, uint32_t name3)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)object;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)name0;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)name1;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)name2;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)name3;
+    __asm__ __volatile__(
+            "" hvc(5121) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Call a kernel support package (KSP) defined interface.
+ *
+ *    @details
+ *    The KSP procedure call allows the caller to interact with customer
+ *    specific functions provided by the kernel support package. The caller
+ *    must possess a capability with the appropriate rights to a KSP agent
+ *        in
+ *    order to call this interface.
+ *
+ *    The remaining parameters provided are passed directly to the KSP
+ *        without
+ *    any inspection.
+ *
+ *    The KSP can return an error code and up to three return words.
+ *
+ * @param agent
+ *    The target KSP agent
+ * @param operation
+ *    The operation to be performed
+ * @param arg0
+ *    An argument for the operation
+ * @param arg1
+ *    An argument for the operation
+ * @param arg2
+ *    An argument for the operation
+ * @param arg3
+ *    An argument for the operation
+ *
+ * @retval error
+ *    The resulting error
+ * @retval ret0
+ *    A return value for the operation
+ * @retval ret1
+ *    A return value for the operation
+ * @retval ret2
+ *    A return value for the operation
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_ksp_procedure_call_return
+_okl4_sys_ksp_procedure_call(okl4_kcap_t agent, okl4_ksp_arg_t operation,
+        okl4_ksp_arg_t arg0, okl4_ksp_arg_t arg1, okl4_ksp_arg_t arg2,
+        okl4_ksp_arg_t arg3)
+{
+    struct _okl4_sys_ksp_procedure_call_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)agent;
+    register uint32_t r1 asm("r1") = (uint32_t)operation;
+    register uint32_t r2 asm("r2") = (uint32_t)arg0;
+    register uint32_t r3 asm("r3") = (uint32_t)arg1;
+    register uint32_t r4 asm("r4") = (uint32_t)arg2;
+    register uint32_t r5 asm("r5") = (uint32_t)arg3;
+    __asm__ __volatile__(
+            ""hvc(5197)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    result.ret0 = (okl4_ksp_arg_t)(r1);
+    result.ret1 = (okl4_ksp_arg_t)(r2);
+    result.ret2 = (okl4_ksp_arg_t)(r3);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_ksp_procedure_call_return
+_okl4_sys_ksp_procedure_call(okl4_kcap_t agent, okl4_ksp_arg_t operation,
+        okl4_ksp_arg_t arg0, okl4_ksp_arg_t arg1, okl4_ksp_arg_t arg2,
+        okl4_ksp_arg_t arg3)
+{
+    struct _okl4_sys_ksp_procedure_call_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)agent;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)operation;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)arg0;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)arg1;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)arg2;
+    register okl4_register_t x5 asm("x5") = (okl4_register_t)arg3;
+    __asm__ __volatile__(
+            "" hvc(5197) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+            :
+            : "cc", "memory", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.ret0 = (okl4_ksp_arg_t)(x1);
+    result.ret1 = (okl4_ksp_arg_t)(x2);
+    result.ret2 = (okl4_ksp_arg_t)(x3);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Attach a segment to an MMU.
+ *
+ *    @details
+ *    Before any mappings based on a segment can be established in the
+ *        MMU's
+ *    address space, the segment must be attached to the MMU. Attaching a
+ *        segment
+ *    serves to reference count the segment, preventing modifications to
+ *        the
+ *    segment being made.
+ *
+ *    A segment may be attached to an MMU multiple times, at the same or
+ *    different index. Each time a segment is attached to an MMU, the
+ *        attachment
+ *    reference count is incremented.
+ *
+ *    Attaching segments to an MMU is also important for VMMU objects in
+ *        that the
+ *    segment attachment index is used as a segment reference in the
+ *        virtual page
+ *    table format.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param segment_id
+ *    The target segment id.
+ * @param index
+ *    Index into the MMU's segment attachment table.
+ * @param perms
+ *    Mapping permissions.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_attach_segment(okl4_kcap_t mmu_id, okl4_kcap_t segment_id,
+        okl4_count_t index, okl4_page_perms_t perms)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)segment_id;
+    register uint32_t r2 asm("r2") = (uint32_t)index;
+    register uint32_t r3 asm("r3") = (uint32_t)perms;
+    __asm__ __volatile__(
+            ""hvc(5152)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_attach_segment(okl4_kcap_t mmu_id, okl4_kcap_t segment_id,
+        okl4_count_t index, okl4_page_perms_t perms)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)segment_id;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)perms;
+    __asm__ __volatile__(
+            "" hvc(5152) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Detach a segment from an MMU.
+ *
+ *    @details
+ *    A segment can be detached from an MMU or vMMU, causing its reference
+ *        count
+ *    to decrease. When the reference count reaches zero, the attachment is
+ *    removed and all mappings in the MMU object relating to the segment
+ *        are
+ *    removed.
+ *
+ *    The detach-segment operation is potentially a long running operation,
+ *    especially if invoked on a vMMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param index
+ *    Index into the MMU's segment attachment table.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_detach_segment(okl4_kcap_t mmu_id, okl4_count_t index)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)index;
+    __asm__ __volatile__(
+            ""hvc(5153)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_detach_segment(okl4_kcap_t mmu_id, okl4_count_t index)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)index;
+    __asm__ __volatile__(
+            "" hvc(5153) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Flush a range of virtual addresses from an MMU.
+ *
+ *    @details
+ *    This causes the kernel to remove all mappings covering the specified
+ *    virtual address range.
+ *
+ *    @note The size of the range must be a multiple of 1MB and the
+ *    starting virtual address must be 1MB aligned.
+ *    There is no support for flushing at a finer granularity.
+ *    If a fine grained flush is required, the caller should use the
+ *    @ref _okl4_sys_mmu_unmap_page operation.
+ *
+ *    The flush-range operation is potentially a long running operation.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    The starting virtual address of the range.
+ *    (Must be 1MB aligned)
+ * @param size
+ *    Size of the range. (Must be a multiple of 1MB)
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_lsize_tr_t size)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)size;
+    __asm__ __volatile__(
+            ""hvc(5154)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_lsize_tr_t size)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+    __asm__ __volatile__(
+            "" hvc(5154) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Flush a range of virtual addresses from an MMU.
+ *
+ *    @details
+ *    This causes the kernel to remove all mappings covering the specified
+ *    virtual address range.
+ *
+ *    @note The size of the range must be a multiple of 1MB and the
+ *    starting virtual address must be 1MB aligned.
+ *    There is no support for flushing at a finer granularity.
+ *    If a fine grained flush is required, the caller should use the
+ *    @ref _okl4_sys_mmu_unmap_page operation.
+ *
+ *    The flush-range operation is potentially a long running operation.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_lsize_pn_t count_pn)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)count_pn;
+    __asm__ __volatile__(
+            ""hvc(5155)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_lsize_pn_t count_pn)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)count_pn;
+    __asm__ __volatile__(
+            "" hvc(5155) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Lookup a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation performs a lookup in the MMU's pagetable for a mapping
+ *    derived from a specified segment.
+ *
+ *    If a mapping is found that is derived from the specified segment, the
+ *    operation will return the segment offset, size and the page
+ *        attributes
+ *    associated with the mapping.
+ *
+ *    If a segment_index value of OKL4_KCAP_INVALID is specified, the
+ *        operation
+ *    will search for a matching segment in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ *
+ * @retval error
+ *    Resulting error.
+ * @retval offset
+ *    Offset into the segment.
+ * @retval size
+ *    Size of the mapping, in bytes. Size will be one of the supported
+ *    machine page-sizes. If a segment search was performed, the lower
+ *        10-bits of
+ *    size contain the returned segment-index.
+ * @retval page_attr
+ *    Mapping attributes.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_page_return
+_okl4_sys_mmu_lookup_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp size_tmp;
+    struct _okl4_sys_mmu_lookup_page_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3");
+    register uint32_t r4 asm("r4");
+    __asm__ __volatile__(
+            ""hvc(5156)"\n\t"
+            : "=r"(r3), "=r"(r4), "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    result.offset = (okl4_psize_tr_t)(r1);
+    size_tmp.words.lo = r2;
+    size_tmp.words.hi = r3;
+    result.size = (okl4_mmu_lookup_size_t)(size_tmp.val);
+    result.page_attr = (_okl4_page_attribute_t)(r4);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_page_return
+_okl4_sys_mmu_lookup_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index)
+{
+    struct _okl4_sys_mmu_lookup_page_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3");
+    __asm__ __volatile__(
+            "" hvc(5156) "\n\t"
+            : "=r"(x3), "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.offset = (okl4_psize_tr_t)(x1);
+    result.size = (okl4_mmu_lookup_size_t)(x2);
+    result.page_attr = (_okl4_page_attribute_t)(x3);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Lookup a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation performs a lookup in the MMU's pagetable for a mapping
+ *    derived from a specified segment.
+ *
+ *    If a mapping is found that is derived from the specified segment, the
+ *    operation will return the segment offset, size and the page
+ *        attributes
+ *    associated with the mapping.
+ *
+ *    If a segment_index value of OKL4_KCAP_INVALID is specified, the
+ *        operation
+ *    will search for a matching segment in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ *
+ * @retval segment_index
+ *    Index into the MMU's segment attachment table, or error.
+ * @retval offset_pn
+ *    Offset into the segment in units of page numbers.
+ * @retval count_pn
+ *    The number of consecutive pages to map/unmap.
+ * @retval page_attr
+ *    Mapping attributes.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_pn_return
+_okl4_sys_mmu_lookup_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index)
+{
+    struct _okl4_sys_mmu_lookup_pn_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3");
+    __asm__ __volatile__(
+            ""hvc(5157)"\n\t"
+            : "=r"(r3), "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    result.segment_index = (okl4_mmu_lookup_index_t)(r0);
+    result.offset_pn = (okl4_psize_pn_t)(r1);
+    result.count_pn = (okl4_lsize_pn_t)(r2);
+    result.page_attr = (_okl4_page_attribute_t)(r3);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_pn_return
+_okl4_sys_mmu_lookup_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index)
+{
+    struct _okl4_sys_mmu_lookup_pn_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3");
+    __asm__ __volatile__(
+            "" hvc(5157) "\n\t"
+            : "=r"(x3), "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.segment_index = (okl4_mmu_lookup_index_t)(x0);
+    result.offset_pn = (okl4_psize_pn_t)(x1);
+    result.count_pn = (okl4_lsize_pn_t)(x2);
+    result.page_attr = (_okl4_page_attribute_t)(x3);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Create a mapping at a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation installs a new mapping into the MMU at the specified
+ *        virtual
+ *    address. The mapping's physical address is determined from the
+ *        specified
+ *    segment and offset, and the mapping's size and attributes are
+ *        provided in
+ *    \p size and \p page_attr.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param offset
+ *    Offset into the segment.
+ * @param size
+ *    Size of the mapping, in bytes.
+ * @param page_attr
+ *    Mapping attributes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_psize_tr_t offset, okl4_lsize_tr_t size
+        , _okl4_page_attribute_t page_attr)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)offset;
+    register uint32_t r4 asm("r4") = (uint32_t)size;
+    register uint32_t r5 asm("r5") = (uint32_t)page_attr;
+    __asm__ __volatile__(
+            ""hvc(5158)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_psize_tr_t offset, okl4_lsize_tr_t size
+        , _okl4_page_attribute_t page_attr)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)offset;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)size;
+    register okl4_register_t x5 asm("x5") = (okl4_register_t)page_attr;
+    __asm__ __volatile__(
+            "" hvc(5158) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+            :
+            : "cc", "memory", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Create a mapping at a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation installs a new mapping into the MMU at the specified
+ *        virtual
+ *    address. The mapping's physical address is determined from the
+ *        specified
+ *    segment and offset, and the mapping's size and attributes are
+ *        provided in
+ *    \p size and \p page_attr.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param segment_offset_pn
+ *    Offset into the segment in units of page numbers.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ * @param page_attr
+ *    Mapping attributes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_psize_pn_t segment_offset_pn,
+        okl4_lsize_pn_t count_pn, _okl4_page_attribute_t page_attr)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)segment_offset_pn;
+    register uint32_t r4 asm("r4") = (uint32_t)count_pn;
+    register uint32_t r5 asm("r5") = (uint32_t)page_attr;
+    __asm__ __volatile__(
+            ""hvc(5159)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_psize_pn_t segment_offset_pn,
+        okl4_lsize_pn_t count_pn, _okl4_page_attribute_t page_attr)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)segment_offset_pn;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)count_pn;
+    register okl4_register_t x5 asm("x5") = (okl4_register_t)page_attr;
+    __asm__ __volatile__(
+            "" hvc(5159) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+            :
+            : "cc", "memory", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Remove a mapping at a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation removes a mapping from the MMU at the specified
+ *        virtual
+ *    address. The size and address specified must match the size and base
+ *    address of the mapping being removed.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param size
+ *    Size of the mapping, in bytes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_lsize_tr_t size)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)size;
+    __asm__ __volatile__(
+            ""hvc(5160)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_lsize_tr_t size)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+    __asm__ __volatile__(
+            "" hvc(5160) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Remove a mapping at a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation removes a mapping from the MMU at the specified
+ *        virtual
+ *    address. The size and address specified must match the size and base
+ *    address of the mapping being removed.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_lsize_pn_t count_pn)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)count_pn;
+    __asm__ __volatile__(
+            ""hvc(5161)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_lsize_pn_t count_pn)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)count_pn;
+    __asm__ __volatile__(
+            "" hvc(5161) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the cache attributes of a mapping in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param size
+ *    Size of the mapping, in bytes.
+ * @param attrs
+ *    Mapping cache attributes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_attrs(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_lsize_tr_t size,
+        okl4_page_cache_t attrs)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)size;
+    register uint32_t r4 asm("r4") = (uint32_t)attrs;
+    __asm__ __volatile__(
+            ""hvc(5162)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_attrs(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_lsize_tr_t size,
+        okl4_page_cache_t attrs)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)size;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)attrs;
+    __asm__ __volatile__(
+            "" hvc(5162) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the page permissions of a mapping in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param size
+ *    Size of the mapping, in bytes.
+ * @param perms
+ *    Mapping permissions.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_perms(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_lsize_tr_t size,
+        okl4_page_perms_t perms)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)size;
+    register uint32_t r4 asm("r4") = (uint32_t)perms;
+    __asm__ __volatile__(
+            ""hvc(5163)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_perms(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_lsize_tr_t size,
+        okl4_page_perms_t perms)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)size;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)perms;
+    __asm__ __volatile__(
+            "" hvc(5163) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the cache attributes of a mapping in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ * @param attrs
+ *    Mapping cache attributes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_attrs(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+        okl4_page_cache_t attrs)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)count_pn;
+    register uint32_t r4 asm("r4") = (uint32_t)attrs;
+    __asm__ __volatile__(
+            ""hvc(5164)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_attrs(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+        okl4_page_cache_t attrs)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)count_pn;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)attrs;
+    __asm__ __volatile__(
+            "" hvc(5164) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the page permissions of a mapping in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ * @param perms
+ *    Mapping permissions.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_perms(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+        okl4_page_perms_t perms)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)count_pn;
+    register uint32_t r4 asm("r4") = (uint32_t)perms;
+    __asm__ __volatile__(
+            ""hvc(5165)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_perms(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+        okl4_page_perms_t perms)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)count_pn;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)perms;
+    __asm__ __volatile__(
+            "" hvc(5165) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * A NULL system-call for latency measurement.
+ *
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_performance_null_syscall(void)
+{
+    register uint32_t r0 asm("r0");
+    __asm__ __volatile__(
+            ""hvc(5198)"\n\t"
+            : "=r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_performance_null_syscall(void)
+{
+    register okl4_register_t x0 asm("x0");
+    __asm__ __volatile__(
+            "" hvc(5198) "\n\t"
+            : "=r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * Control a pipe, including reset, ready and halt functionality.
+ *
+ * @param pipe_id
+ *    The capability identifier of the pipe.
+ * @param control
+ *    The state control argument.
+ *
+ * @retval error
+ *    The returned error code.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_control(okl4_kcap_t pipe_id, okl4_pipe_control_t control)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)pipe_id;
+    register uint32_t r1 asm("r1") = (uint32_t)control;
+    __asm__ __volatile__(
+            ""hvc(5146)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_control(okl4_kcap_t pipe_id, okl4_pipe_control_t control)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)pipe_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)control;
+    __asm__ __volatile__(
+            "" hvc(5146) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * Send a message from a microvisor pipe.
+ *
+ * @param pipe_id
+ *    The capability identifier of the pipe.
+ * @param buf_size
+ *    Size of the receive buffer.
+ * @param data
+ *    Pointer to receive buffer.
+ *
+ * @retval error
+ *    The returned error code.
+ * @retval size
+ *    Size of the received message.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_pipe_recv_return
+_okl4_sys_pipe_recv(okl4_kcap_t pipe_id, okl4_vsize_t buf_size, uint8_t *data)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp size_tmp;
+    struct _okl4_sys_pipe_recv_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)pipe_id;
+    register uint32_t r1 asm("r1") = (uint32_t)buf_size;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)data;
+    __asm__ __volatile__(
+            ""hvc(5147)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    size_tmp.words.lo = r1;
+    size_tmp.words.hi = r2;
+    result.size = (okl4_ksize_t)(size_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_pipe_recv_return
+_okl4_sys_pipe_recv(okl4_kcap_t pipe_id, okl4_vsize_t buf_size, uint8_t *data)
+{
+    struct _okl4_sys_pipe_recv_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)pipe_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)buf_size;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)data;
+    __asm__ __volatile__(
+            "" hvc(5147) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.size = (okl4_ksize_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * Send a message to a microvisor pipe.
+ *
+ * @param pipe_id
+ *    The capability identifier of the pipe.
+ * @param size
+ *    Size of the message to send.
+ * @param data
+ *    Pointer to the message payload to send.
+ *
+ * @retval error
+ *    The returned error code.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_send(okl4_kcap_t pipe_id, okl4_vsize_t size, const uint8_t *data)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)pipe_id;
+    register uint32_t r1 asm("r1") = (uint32_t)size;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)data;
+    __asm__ __volatile__(
+            ""hvc(5148)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_send(okl4_kcap_t pipe_id, okl4_vsize_t size, const uint8_t *data)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)pipe_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)size;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)data;
+    __asm__ __volatile__(
+            "" hvc(5148) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Waive the current vCPU's priority.
+ *
+ *    @details
+ *    This operation allows a vCPU to change its waived priority. A vCPU
+ *        has
+ *    both a base priority and its current priority.
+ *
+ *    The base priority is the statically assigned maximum priority that a
+ *        vCPU
+ *    has been given. The current priority is the priority used for system
+ *    scheduling and is limited to the range of zero to the base priority.
+ *
+ *    The `waive-priority` operation allows a vCPU to set its current
+ *        priority
+ *    and is normally used to reduce its current priority. This allows a
+ *        vCPU to
+ *    perform work at a lower system priority, and supports the interleaved
+ *    scheduling feature.
+ *
+ *    A vCPU's priority is restored to its base priority whenever an
+ *        interrupt
+ *    that has the vCPU registered as its handler is raised. This allows
+ *    interrupt handling and guest operating systems to return to the base
+ *    priority to potentially do higher priority work.
+ *
+ *    After calling this interface an immediate reschedule will be
+ *        performed.
+ *
+ * @param priority
+ *    New vCPU priority.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_priority_waive(okl4_priority_t priority)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)priority;
+    __asm__ __volatile__(
+            ""hvc(5151)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_priority_waive(okl4_priority_t priority)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)priority;
+    __asm__ __volatile__(
+            "" hvc(5151) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_GET_REGISTER
+ *
+ * @param target
+ * @param reg_and_set
+ *
+ * @retval reg_w0
+ * @retval reg_w1
+ * @retval reg_w2
+ * @retval reg_w3
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_remote_get_register_return
+_okl4_sys_remote_get_register(okl4_kcap_t target,
+        okl4_register_and_set_t reg_and_set)
+{
+    struct _okl4_sys_remote_get_register_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)reg_and_set;
+    register uint32_t r2 asm("r2");
+    register uint32_t r3 asm("r3");
+    register uint32_t r4 asm("r4");
+    __asm__ __volatile__(
+            ""hvc(5200)"\n\t"
+            : "=r"(r2), "=r"(r3), "=r"(r4), "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    result.reg_w0 = (uint32_t)(r0);
+    result.reg_w1 = (uint32_t)(r1);
+    result.reg_w2 = (uint32_t)(r2);
+    result.reg_w3 = (uint32_t)(r3);
+    result.error = (okl4_error_t)(r4);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_remote_get_register_return
+_okl4_sys_remote_get_register(okl4_kcap_t target,
+        okl4_register_and_set_t reg_and_set)
+{
+    struct _okl4_sys_remote_get_register_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)reg_and_set;
+    register okl4_register_t x2 asm("x2");
+    register okl4_register_t x3 asm("x3");
+    register okl4_register_t x4 asm("x4");
+    __asm__ __volatile__(
+            "" hvc(5200) "\n\t"
+            : "=r"(x2), "=r"(x3), "=r"(x4), "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    result.reg_w0 = (uint32_t)(x0);
+    result.reg_w1 = (uint32_t)(x1);
+    result.reg_w2 = (uint32_t)(x2);
+    result.reg_w3 = (uint32_t)(x3);
+    result.error = (okl4_error_t)(x4);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_GET_REGISTERS
+ *
+ * @param target
+ * @param set
+ * @param regs
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_get_registers(okl4_kcap_t target, okl4_register_set_t set,
+        void *regs)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)set;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)regs;
+    __asm__ __volatile__(
+            ""hvc(5201)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_get_registers(okl4_kcap_t target, okl4_register_set_t set,
+        void *regs)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)set;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)regs;
+    __asm__ __volatile__(
+            "" hvc(5201) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_READ_MEMORY32
+ *
+ * @param target
+ * @param address
+ *
+ * @retval data
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_remote_read_memory32_return
+_okl4_sys_remote_read_memory32(okl4_kcap_t target, okl4_laddr_t address)
+{
+    struct _okl4_sys_remote_read_memory32_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)(address        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((address >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5202)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.data = (uint32_t)(r0);
+    result.error = (okl4_error_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_remote_read_memory32_return
+_okl4_sys_remote_read_memory32(okl4_kcap_t target, okl4_laddr_t address)
+{
+    struct _okl4_sys_remote_read_memory32_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)address;
+    __asm__ __volatile__(
+            "" hvc(5202) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.data = (uint32_t)(x0);
+    result.error = (okl4_error_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_SET_REGISTER
+ *
+ * @param target
+ * @param reg_and_set
+ * @param reg_w0
+ * @param reg_w1
+ * @param reg_w2
+ * @param reg_w3
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_register(okl4_kcap_t target,
+        okl4_register_and_set_t reg_and_set, uint32_t reg_w0, uint32_t reg_w1,
+        uint32_t reg_w2, uint32_t reg_w3)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)reg_and_set;
+    register uint32_t r2 asm("r2") = (uint32_t)reg_w0;
+    register uint32_t r3 asm("r3") = (uint32_t)reg_w1;
+    register uint32_t r4 asm("r4") = (uint32_t)reg_w2;
+    register uint32_t r5 asm("r5") = (uint32_t)reg_w3;
+    __asm__ __volatile__(
+            ""hvc(5203)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_register(okl4_kcap_t target,
+        okl4_register_and_set_t reg_and_set, uint32_t reg_w0, uint32_t reg_w1,
+        uint32_t reg_w2, uint32_t reg_w3)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)reg_and_set;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)reg_w0;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)reg_w1;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)reg_w2;
+    register okl4_register_t x5 asm("x5") = (okl4_register_t)reg_w3;
+    __asm__ __volatile__(
+            "" hvc(5203) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+            :
+            : "cc", "memory", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_SET_REGISTERS
+ *
+ * @param target
+ * @param set
+ * @param regs
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_registers(okl4_kcap_t target, okl4_register_set_t set,
+        void *regs)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)set;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)regs;
+    __asm__ __volatile__(
+            ""hvc(5204)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_registers(okl4_kcap_t target, okl4_register_set_t set,
+        void *regs)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)set;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)regs;
+    __asm__ __volatile__(
+            "" hvc(5204) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_WRITE_MEMORY32
+ *
+ * @param target
+ * @param address
+ * @param data
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_write_memory32(okl4_kcap_t target, okl4_laddr_t address,
+        uint32_t data)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)(address        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((address >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)data;
+    __asm__ __volatile__(
+            ""hvc(5205)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_write_memory32(okl4_kcap_t target, okl4_laddr_t address,
+        uint32_t data)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)address;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)data;
+    __asm__ __volatile__(
+            "" hvc(5205) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * Retrieve suspend status.
+ *
+ * @param scheduler_id
+ *    The scheduler capability identifier.
+ *
+ * @retval error
+ *    Resulting error.
+ * @retval power_suspend_version
+ *    The power suspend versioning number
+ * @retval power_suspend_running_count
+ *    The number of running power_suspend watched vCPUs
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_metrics_status_suspended_return
+_okl4_sys_schedule_metrics_status_suspended(okl4_kcap_t scheduler_id)
+{
+    struct _okl4_sys_schedule_metrics_status_suspended_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5206)"\n\t"
+            : "=r"(r1), "=r"(r2), "+r"(r0)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    result.power_suspend_version = (uint32_t)(r1);
+    result.power_suspend_running_count = (uint32_t)(r2);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_metrics_status_suspended_return
+_okl4_sys_schedule_metrics_status_suspended(okl4_kcap_t scheduler_id)
+{
+    struct _okl4_sys_schedule_metrics_status_suspended_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+    register okl4_register_t x1 asm("x1");
+    register okl4_register_t x2 asm("x2");
+    __asm__ __volatile__(
+            "" hvc(5206) "\n\t"
+            : "=r"(x1), "=r"(x2), "+r"(x0)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.power_suspend_version = (uint32_t)(x1);
+    result.power_suspend_running_count = (uint32_t)(x2);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * Register a vCPU for suspend count tracking.
+ *
+ * @param scheduler_id
+ *    The scheduler capability identifier.
+ * @param vcpu_id
+ *    The target vCPU capability identifier.
+ * @param watch
+ *    Whether to register or unregister
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_metrics_watch_suspended(okl4_kcap_t scheduler_id,
+        okl4_kcap_t vcpu_id, okl4_bool_t watch)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vcpu_id;
+    register uint32_t r2 asm("r2") = (uint32_t)watch;
+    __asm__ __volatile__(
+            ""hvc(5207)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_metrics_watch_suspended(okl4_kcap_t scheduler_id,
+        okl4_kcap_t vcpu_id, okl4_bool_t watch)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vcpu_id;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)watch;
+    __asm__ __volatile__(
+            "" hvc(5207) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Disable profiling of a physical CPU.
+ *
+ * @param phys_cpu
+ *    The physical CPU capability id.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_disable(okl4_kcap_t phys_cpu)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)phys_cpu;
+    __asm__ __volatile__(
+            ""hvc(5168)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_disable(okl4_kcap_t phys_cpu)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)phys_cpu;
+    __asm__ __volatile__(
+            "" hvc(5168) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable profiling of a physical CPU.
+ *
+ *    This operation enables profiling of physical CPU related properties
+ *        such as
+ *    core usage and context switch count.
+ *
+ * @param phys_cpu
+ *    The physical CPU capability id.
+ *
+ * @retval error
+ *    Resulting error.
+ * @retval timestamp
+ *    The current timestamp.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_cpu_enable_return
+_okl4_sys_schedule_profile_cpu_enable(okl4_kcap_t phys_cpu)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp timestamp_tmp;
+    struct _okl4_sys_schedule_profile_cpu_enable_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)phys_cpu;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5169)"\n\t"
+            : "=r"(r1), "=r"(r2), "+r"(r0)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    timestamp_tmp.words.lo = r1;
+    timestamp_tmp.words.hi = r2;
+    result.timestamp = (uint64_t)(timestamp_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_cpu_enable_return
+_okl4_sys_schedule_profile_cpu_enable(okl4_kcap_t phys_cpu)
+{
+    struct _okl4_sys_schedule_profile_cpu_enable_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)phys_cpu;
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5169) "\n\t"
+            : "=r"(x1), "+r"(x0)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.timestamp = (uint64_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Retrieve profiling data relating to a physical CPU core.
+ *
+ *    @details
+ *    This operation returns a set of profiling data relating to a physical
+ *        CPU.
+ *    A timestamp of the current system time in units of microseconds is
+ *        recorded
+ *    during the operation. The remaining data fields indicate runtime and
+ *    number of events since the last invocation of this operation.
+ *
+ *    After the profiling data is retrieved, the kernel resets all metrics
+ *        to
+ *    zero.
+ *
+ *    @par profile data
+ *    For a physical CPU, the returned data is:
+ *    - \p cpu_time: Idle time of the CPU in microseconds.
+ *    - \p context_switches: Number of context switches on this core.
+ *    - \p enabled: True if profiling is enabled on this CPU.
+ *
+ * @param phys_cpu
+ *    The physical CPU capability id.
+ * @param profile
+ *    `return by reference`. Profiling data.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_get_data(okl4_kcap_t phys_cpu,
+        struct okl4_schedule_profile_data *profile)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)phys_cpu;
+    register uint32_t r1 asm("r1") = (uint32_t)(uintptr_t)profile;
+    __asm__ __volatile__(
+            ""hvc(5170)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_get_data(okl4_kcap_t phys_cpu,
+        struct okl4_schedule_profile_data *profile)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)phys_cpu;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)(uintptr_t)profile;
+    __asm__ __volatile__(
+            "" hvc(5170) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Disable profiling of a vCPU.
+ *
+ * @param vcpu
+ *    The target vCPU id.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_disable(okl4_kcap_t vcpu)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    __asm__ __volatile__(
+            ""hvc(5171)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_disable(okl4_kcap_t vcpu)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    __asm__ __volatile__(
+            "" hvc(5171) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable profiling of a vCPU.
+ *
+ *    This operation enables profiling of vCPU related properties such as
+ *    execution time and context switch count.
+ *
+ * @param vcpu
+ *    The target vCPU id.
+ *
+ * @retval error
+ *    Resulting error.
+ * @retval timestamp
+ *    The current timestamp.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_vcpu_enable_return
+_okl4_sys_schedule_profile_vcpu_enable(okl4_kcap_t vcpu)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp timestamp_tmp;
+    struct _okl4_sys_schedule_profile_vcpu_enable_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5172)"\n\t"
+            : "=r"(r1), "=r"(r2), "+r"(r0)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    timestamp_tmp.words.lo = r1;
+    timestamp_tmp.words.hi = r2;
+    result.timestamp = (uint64_t)(timestamp_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_vcpu_enable_return
+_okl4_sys_schedule_profile_vcpu_enable(okl4_kcap_t vcpu)
+{
+    struct _okl4_sys_schedule_profile_vcpu_enable_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5172) "\n\t"
+            : "=r"(x1), "+r"(x0)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.timestamp = (uint64_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Retrieve profiling data relating to a vCPU.
+ *
+ *    @details
+ *    This operation returns a set of profiling data relating to a vCPU.
+ *    A timestamp of the current system time in units of microseconds is
+ *        recorded
+ *    during the operation. The remaining data fields indicate runtime and
+ *    number of events since the last invocation of this operation.
+ *
+ *    After the profiling data is retrieved, the kernel resets all metrics
+ *        to
+ *    zero.
+ *
+ *    @par profile data
+ *    For a vCPU, the returned data is:
+ *    - \p cpu_time: Execution time of the vCPU in microseconds.
+ *    - \p context_switches: Number of context switches.
+ *    - \p cpu_migrations: Number of migrations between physical CPUs.
+ *    - \p enabled: True if profiling is enabled on this CPU.
+ *
+ * @param vcpu
+ *    The target vCPU id.
+ * @param profile
+ *    `return by reference`. Profiling data.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_get_data(okl4_kcap_t vcpu,
+        struct okl4_schedule_profile_data *profile)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    register uint32_t r1 asm("r1") = (uint32_t)(uintptr_t)profile;
+    __asm__ __volatile__(
+            ""hvc(5173)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_get_data(okl4_kcap_t vcpu,
+        struct okl4_schedule_profile_data *profile)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)(uintptr_t)profile;
+    __asm__ __volatile__(
+            "" hvc(5173) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: SCHEDULER_SUSPEND
+ *
+ * @param scheduler_id
+ * @param power_state
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_scheduler_suspend(okl4_kcap_t scheduler_id,
+        okl4_power_state_t power_state)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+    register uint32_t r1 asm("r1") = (uint32_t)power_state;
+    __asm__ __volatile__(
+            ""hvc(5150)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_scheduler_suspend(okl4_kcap_t scheduler_id,
+        okl4_power_state_t power_state)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)power_state;
+    __asm__ __volatile__(
+            "" hvc(5150) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Cancel an active timeout on a specified timer.
+ *
+ *    @details
+ *    This operation cancels an active timeout on a specified timer. The
+ *    operation returns the time that was remaining on the cancelled
+ *        timeout.
+ *    If there was not an active timeout, the operation returns an error.
+ *
+ *    The returned remaining time is formatted in the requested units from
+ *        the
+ *    \p flags argument.
+ *
+ *    The operation will also return the \p old_flags field indicating
+ *        whether
+ *    the canceled timeout was periodic or one-shot and whether it was an
+ *    absolute or relative timeout.
+ *
+ *    @par flags
+ *    - If the \p units flag is set, the remaining time is returned in
+ *        units
+ *    of timer ticks. The length of a timer tick is KSP defined and may be
+ *    obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ *    - If the \p units flag is not set, the remaining time is returned in
+ *    nanoseconds.
+ *
+ *    @par old_flags
+ *    - If the \p periodic flag is set, the cancelled timeout was periodic.
+ *    - If the \p periodic flag is not set, the cancelled timeout was
+ *    one-shot.
+ *    - If the \p absolute flag is set, the cancelled timeout was an
+ *    absolute time.
+ *    - If the \p absolute flag is not set, the cancelled timeout was a
+ *    relative time.
+ *
+ * @param timer
+ *    The target timer capability.
+ * @param flags
+ *    Flags for the requested operation.
+ *
+ * @retval remaining
+ *    Time that was remaining on the cancelled timeout.
+ * @retval old_flags
+ *    Flags relating to the cancelled timeout.
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_cancel_return
+_okl4_sys_timer_cancel(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp remaining_tmp;
+    struct _okl4_sys_timer_cancel_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1") = (uint32_t)flags;
+    register uint32_t r2 asm("r2");
+    register uint32_t r3 asm("r3");
+    __asm__ __volatile__(
+            ""hvc(5176)"\n\t"
+            : "=r"(r2), "=r"(r3), "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    remaining_tmp.words.lo = r0;
+    remaining_tmp.words.hi = r1;
+    result.remaining = (uint64_t)(remaining_tmp.val);
+    result.old_flags = (okl4_timer_flags_t)(r2);
+    result.error = (okl4_error_t)(r3);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_cancel_return
+_okl4_sys_timer_cancel(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    struct _okl4_sys_timer_cancel_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)flags;
+    register okl4_register_t x2 asm("x2");
+    __asm__ __volatile__(
+            "" hvc(5176) "\n\t"
+            : "=r"(x2), "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.remaining = (uint64_t)(x0);
+    result.old_flags = (okl4_timer_flags_t)(x1);
+    result.error = (okl4_error_t)(x2);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query the timer frequency and obtain time conversion constants.
+ *
+ *    @details
+ *    This operation returns the timer frequency and the conversion
+ *        constants
+ *    that may be used to convert between units of nanoseconds and units of
+ *    ticks.
+ *
+ *    The timer frequency is returned as a 64-bit value in units of
+ *        micro-hertz.
+ *    (1000000 = 1Hz).
+ *    The timer resolution (or period) can be calculated from the
+ *        frequency.
+ *
+ *    The time conversion constants are retuned as values \p a and \p b
+ *        which can
+ *    be used for unit conversions as follows:
+ *    - ns = (ticks) * \p a / \p b
+ *    - ticks = (ns * \p b) / \p a
+ *
+ *    @note
+ *    The constants are provided by the KSP module and are designed to be
+ *        used
+ *    for simple overflow-free computation using 64-bit arithmetic covering
+ *        the
+ *    time values from 0 to 2 years.
+ *
+ * @param timer
+ *    The target timer capability.
+ *
+ * @retval tick_freq
+ *    The timer frequency [in units of micro-hertz].
+ * @retval a
+ *    Ticks to nanoseconds conversion multiplier.
+ * @retval b
+ *    Ticks to nanoseconds conversion divisor.
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_resolution_return
+_okl4_sys_timer_get_resolution(okl4_kcap_t timer)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp tick_freq_tmp;
+    struct _okl4_sys_timer_get_resolution_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    register uint32_t r3 asm("r3");
+    register uint32_t r4 asm("r4");
+    __asm__ __volatile__(
+            ""hvc(5177)"\n\t"
+            : "=r"(r1), "=r"(r2), "=r"(r3), "=r"(r4), "+r"(r0)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    tick_freq_tmp.words.lo = r0;
+    tick_freq_tmp.words.hi = r1;
+    result.tick_freq = (uint64_t)(tick_freq_tmp.val);
+    result.a = (uint32_t)(r2);
+    result.b = (uint32_t)(r3);
+    result.error = (okl4_error_t)(r4);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_resolution_return
+_okl4_sys_timer_get_resolution(okl4_kcap_t timer)
+{
+    struct _okl4_sys_timer_get_resolution_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1");
+    register okl4_register_t x2 asm("x2");
+    register okl4_register_t x3 asm("x3");
+    __asm__ __volatile__(
+            "" hvc(5177) "\n\t"
+            : "=r"(x1), "=r"(x2), "=r"(x3), "+r"(x0)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.tick_freq = (uint64_t)(x0);
+    result.a = (uint32_t)(x1);
+    result.b = (uint32_t)(x2);
+    result.error = (okl4_error_t)(x3);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query the current system time.
+ *
+ *    @details
+ *    This operation returns the current absolute system time. The \p flags
+ *    argument is used to specify the desired units for the return value.
+ *
+ *    - Absolute time is based on an arbitrary time zero, defined to be at
+ *    or before the time of boot.
+ *
+ *    @par flags
+ *    - If the \p units flag is set, the time is returned in units
+ *    of timer ticks. The length of a timer tick is KSP defined and may
+ *    be obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ *    - If the \p units flag is not set, the time is returned in
+ *    terms of nanoseconds.
+ *
+ * @param timer
+ *    The target timer capability.
+ * @param flags
+ *    Flags for the requested operation.
+ *
+ * @retval time
+ *    The current system time.
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_time_return
+_okl4_sys_timer_get_time(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp time_tmp;
+    struct _okl4_sys_timer_get_time_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1") = (uint32_t)flags;
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5178)"\n\t"
+            : "=r"(r2), "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    time_tmp.words.lo = r0;
+    time_tmp.words.hi = r1;
+    result.time = (uint64_t)(time_tmp.val);
+    result.error = (okl4_error_t)(r2);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_time_return
+_okl4_sys_timer_get_time(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    struct _okl4_sys_timer_get_time_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)flags;
+    __asm__ __volatile__(
+            "" hvc(5178) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.time = (uint64_t)(x0);
+    result.error = (okl4_error_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query a timer about an active timeout.
+ *
+ *    @details
+ *    The operation queries a timer about an active timeout. If there is no
+ *    active timeout, this operation returns an error.
+ *
+ *    If the timer has an active timeout, this operation returns the
+ *        remaining
+ *    time and the flags associated with the timeout. The remaining time is
+ *    returned in the requested units from the \p flags argument.
+ *
+ *    The operation also returns the \p active_flags field indicating
+ *        whether the
+ *    active timeout is periodic or one-shot and whether it was an absolute
+ *        or
+ *    relative timeout.
+ *
+ *    @par flags
+ *    - If the \p units flag is set, the remaining time is returned in
+ *        units
+ *    of timer ticks. The length of a timer tick is KSP defined and may
+ *    be obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ *    - If the \p units flag is not set, the remaining time is returned in
+ *    units of nanoseconds.
+ *
+ *    @par active_flags
+ *    - If the \p periodic flag is set, the timeout is periodic.
+ *    - If the \p periodic flag is not set, the timeout is one-shot.
+ *    - If the \p absolute flag is set, the timeout is an absolute time.
+ *    - If the \p absolute flag is not set, the timeout is a relative time.
+ *
+ * @param timer
+ *    The target timer capability.
+ * @param flags
+ *    Flags for the requested operation.
+ *
+ * @retval remaining
+ *    Time remaining before the next timeout.
+ * @retval active_flags
+ *    Flags relating to the active timeout.
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_query_return
+_okl4_sys_timer_query(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp remaining_tmp;
+    struct _okl4_sys_timer_query_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1") = (uint32_t)flags;
+    register uint32_t r2 asm("r2");
+    register uint32_t r3 asm("r3");
+    __asm__ __volatile__(
+            ""hvc(5179)"\n\t"
+            : "=r"(r2), "=r"(r3), "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    remaining_tmp.words.lo = r0;
+    remaining_tmp.words.hi = r1;
+    result.remaining = (uint64_t)(remaining_tmp.val);
+    result.active_flags = (okl4_timer_flags_t)(r2);
+    result.error = (okl4_error_t)(r3);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_query_return
+_okl4_sys_timer_query(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    struct _okl4_sys_timer_query_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)flags;
+    register okl4_register_t x2 asm("x2");
+    __asm__ __volatile__(
+            "" hvc(5179) "\n\t"
+            : "=r"(x2), "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.remaining = (uint64_t)(x0);
+    result.active_flags = (okl4_timer_flags_t)(x1);
+    result.error = (okl4_error_t)(x2);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Start a timer with a specified timeout.
+ *
+ *    @details
+ *    This operation optionally resets then starts a timer with a new
+ *        timeout.
+ *    The specified timeout may be an `absolute` or `relative` time, may be
+ *    `one-shot` or `periodic` and may be specified in units of nanoseconds
+ *        or
+ *    ticks.
+ *
+ *    @par flags
+ *    - If the \p absolute flag is set, the timeout is treated as an
+ *    absolute time based on an arbitrary time zero, defined to be at or
+ *    before the time of boot.
+ *    - If the \p absolute flag is not set, the timeout is treated as a
+ *    relative time a specified amount of into the future. E.g. 10ms from
+ *    now.
+ *    - If the \p periodic flag is set, the timeout is treated as a
+ *        periodic
+ *    timeout that repeats with a period equal to the specified timeout.
+ *    - If the \p periodic flag is not set, the timeout is treated as a
+ *    one-shot timeout that expires at the specified time and does not
+ *    repeat.
+ *    - If the \p units flag is set, the timeout is specified in units of
+ *    timer ticks. The length of a timer tick is KSP defined and may be
+ *    obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ *    - If the \p units flag is not set, the timeout is specified in units
+ *    of nanoseconds.
+ *    - The \p reload flag allows an active timeout to be cancelled and the
+ *    new timeout is programmed into the timer.
+ *
+ * @param timer
+ *    The target timer capability.
+ * @param timeout
+ *    The timeout value.
+ * @param flags
+ *    Flags for the requested operation.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_timer_start(okl4_kcap_t timer, uint64_t timeout,
+        okl4_timer_flags_t flags)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1") = (uint32_t)(timeout        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((timeout >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)flags;
+    __asm__ __volatile__(
+            ""hvc(5180)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_timer_start(okl4_kcap_t timer, uint64_t timeout,
+        okl4_timer_flags_t flags)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)timeout;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)flags;
+    __asm__ __volatile__(
+            "" hvc(5180) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: TRACEBUFFER_SYNC
+ *
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_tracebuffer_sync(void)
+{
+    __asm__ __volatile__(
+            ""hvc(5199)"\n\t"
+            :
+            :
+            : "cc", "memory", "r0", "r1", "r2", "r3", "r4", "r5"
+            );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_tracebuffer_sync(void)
+{
+    __asm__ __volatile__(
+            "" hvc(5199) "\n\t"
+            :
+            :
+            : "cc", "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Reset a vCPU.
+ *
+ *    @details
+ *    This operation resets a vCPU to its boot state.
+ *
+ * @param vcpu
+ *    The target vCPU capability.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_reset(okl4_kcap_t vcpu)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    __asm__ __volatile__(
+            ""hvc(5122)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_reset(okl4_kcap_t vcpu)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    __asm__ __volatile__(
+            "" hvc(5122) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Start a vCPU executing.
+ *
+ *    @details
+ *    This operation starts a stopped vCPU, at an optionally specified
+ *    instruction pointer. If instruction pointer is not to be set the
+ *    value at the previous stop is preserved.
+ *
+ * @param vcpu
+ *    The target vCPU capability.
+ * @param set_ip
+ *    Should the instruction pointer be set.
+ * @param ip
+ *    Instruction pointer to start the vCPU at.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_start(okl4_kcap_t vcpu, okl4_bool_t set_ip, void *ip)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    register uint32_t r1 asm("r1") = (uint32_t)set_ip;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)ip;
+    __asm__ __volatile__(
+            ""hvc(5123)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_start(okl4_kcap_t vcpu, okl4_bool_t set_ip, void *ip)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)set_ip;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)ip;
+    __asm__ __volatile__(
+            "" hvc(5123) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Stop a vCPU executing.
+ *
+ *    @details
+ *    This operation stops a vCPU's execution until next restarted.
+ *
+ * @param vcpu
+ *    The target vCPU capability.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_stop(okl4_kcap_t vcpu)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    __asm__ __volatile__(
+            ""hvc(5124)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_stop(okl4_kcap_t vcpu)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    __asm__ __volatile__(
+            "" hvc(5124) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Switch a vCPU's execution mode between 32-bit and 64-bit.
+ *
+ *    @details
+ *    This operation resets a vCPU to its boot state, switches between
+ *        32-bit
+ *    and 64-bit modes, and restarts execution at the specified address.
+ *        The
+ *    start address must be valid in the vCPU's initial address space,
+ *        which may
+ *    not be the same as the caller's address space.
+ *
+ * @param vcpu
+ *    The target vCPU capability.
+ * @param to_64bit
+ *    The vCPU will reset in 64-bit mode if true; otherwise in 32-bit mode
+ * @param set_ip
+ *    Should the instruction pointer be set.
+ * @param ip
+ *    Instruction pointer to start the vCPU at.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_switch_mode(okl4_kcap_t vcpu, okl4_bool_t to_64bit,
+        okl4_bool_t set_ip, void *ip)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    register uint32_t r1 asm("r1") = (uint32_t)to_64bit;
+    register uint32_t r2 asm("r2") = (uint32_t)set_ip;
+    register uint32_t r3 asm("r3") = (uint32_t)(uintptr_t)ip;
+    __asm__ __volatile__(
+            ""hvc(5125)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_switch_mode(okl4_kcap_t vcpu, okl4_bool_t to_64bit,
+        okl4_bool_t set_ip, void *ip)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)to_64bit;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)set_ip;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)(uintptr_t)ip;
+    __asm__ __volatile__(
+            "" hvc(5125) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Signal a synchronization event.
+ *
+ *    @details
+ *    This operation sets the wakeup flags for all vCPUs in the caller's
+ *        domain.
+ *    If any vCPUs in the domain are waiting due to a pending `sync_wfe`
+ *        operation,
+ *    they will be released from the wait. The OKL4 scheduler will then
+ *        determine
+ *    which vCPUs should execute first based on their priority.
+ *
+ *    This `sync_sev` operation is non-blocking and is used to signal other
+ *        vCPUs
+ *    about some user-defined event. A typical use of this operation is to
+ *        signal
+ *    the release of a spinlock to other waiting vCPUs.
+ *
+ *    @see _okl4_sys_vcpu_sync_wfe
+ *
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_sev(void)
+{
+    __asm__ __volatile__(
+            ""hvc(5126)"\n\t"
+            :
+            :
+            : "cc", "memory", "r0", "r1", "r2", "r3", "r4", "r5"
+            );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_sev(void)
+{
+    __asm__ __volatile__(
+            "" hvc(5126) "\n\t"
+            :
+            :
+            : "cc", "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Wait for a synchronization event.
+ *
+ *    @details
+ *    This operation is used to defer the execution of a vCPU while it is
+ *        waiting
+ *    for an event. This operation is non-blocking, in that if no other
+ *        vCPUs in
+ *    the system are runnable, the operation will complete and the vCPU is
+ *        not
+ *    blocked. The `sync_wfe` operation uses the \p holder argument as a
+ *        hint to
+ *    the vCPU the caller is waiting on.
+ *
+ *    This operation first determines whether there is a pending wakeup
+ *        flag set
+ *    for the calling vCPU. If the flag is set, the operation clears the
+ *        flag and
+ *    returns immediately. If the caller has provided a valid \p holder id,
+ *        and
+ *    the holder is currently executing on a different physical core, the
+ *    operation again returns immediately.
+ *
+ *    In all other cases, the Microvisor records that the vCPU is waiting
+ *        and
+ *    reduces the vCPU's priority temporarily to the lowest priority in
+ *    the system. The scheduler is then invoked to rebalance the system.
+ *
+ *    A waiting vCPU will continue execution and return from the `sync_wfe`
+ *    operation as soon as no higher priority vCPUs in the system are
+ *        available
+ *    for scheduling, or a wake-up event is signalled by another vCPU in
+ *        the same
+ *    domain.
+ *
+ *    @par holder
+ *    The holder identifier may be a valid capability to another vCPU, or
+ *        an
+ *    invalid id. If the provided id is valid, it is used as a hint to the
+ *    Microvisor that the caller is waiting on the specified vCPU. The
+ *    `vcpu_sync` API is optimized for short spinlock type use-cases and
+ *        will
+ *    therefore allow the caller to continue execution without waiting, if
+ *        the
+ *    target \p holder vCPU is presently running on another physical core.
+ *        This
+ *    is done to reduce latency with the expectation that the holder vCPU
+ *        will
+ *    soon release the lock.
+ *
+ *    @see _okl4_sys_vcpu_sync_sev
+ *
+ * @param holder
+ *    Capability of the vCPU to wait for, or an invalid designator.
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_wfe(okl4_kcap_t holder)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)holder;
+    __asm__ __volatile__(
+            ""hvc(5127)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_wfe(okl4_kcap_t holder)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)holder;
+    __asm__ __volatile__(
+            "" hvc(5127) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Atomically fetch an interrupt payload and raise a virtual interrupt.
+ *
+ *    @details
+ *    This API is equivalent to atomically calling @ref
+ *        sys_interrupt_get_payload
+ *    and @ref sys_vinterrupt_modify. Typically, the specified virtual
+ *        interrupt
+ *    will be one that is not attached to the specified virtual interrupt
+ *        source,
+ *    but this is not enforced. If only one virtual interrupt source is
+ *        affected,
+ *    then the @ref sys_interrupt_get_payload phase will occur first.
+ *
+ *    Certain communication protocols must perform this sequence of
+ *        operations
+ *    atomically in order to maintain consistency. Other than being atomic,
+ *        this
+ *    is no different to invoking the two component operations separately.
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param virqline
+ *    A virtual interrupt line capability.
+ * @param mask
+ *    A machine-word-sized array of payload flags to preserve.
+ * @param payload
+ *    A machine-word-sized array of payload flags to set.
+ *
+ * @retval error
+ *    The resulting error value.
+ * @retval payload
+ *    Accumulated virtual interrupt payload flags.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_vinterrupt_clear_and_raise_return
+_okl4_sys_vinterrupt_clear_and_raise(okl4_interrupt_number_t irq,
+        okl4_kcap_t virqline, okl4_virq_flags_t mask, okl4_virq_flags_t payload)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp payload_tmp;
+    struct _okl4_sys_vinterrupt_clear_and_raise_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)virqline;
+    register uint32_t r2 asm("r2") = (uint32_t)(mask        & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)((mask >> 32) & 0xffffffff);
+    register uint32_t r4 asm("r4") = (uint32_t)(payload        & 0xffffffff);
+    register uint32_t r5 asm("r5") = (uint32_t)((payload >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5194)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    payload_tmp.words.lo = r1;
+    payload_tmp.words.hi = r2;
+    result.payload = (okl4_virq_flags_t)(payload_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_vinterrupt_clear_and_raise_return
+_okl4_sys_vinterrupt_clear_and_raise(okl4_interrupt_number_t irq,
+        okl4_kcap_t virqline, okl4_virq_flags_t mask, okl4_virq_flags_t payload)
+{
+    struct _okl4_sys_vinterrupt_clear_and_raise_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)virqline;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)mask;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)payload;
+    __asm__ __volatile__(
+            "" hvc(5194) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.payload = (okl4_virq_flags_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Raise a virtual interrupt, and modify the payload flags.
+ *
+ *    @details
+ *    This triggers a virtual interrupt by raising a virtual interrupt
+ *        source. A
+ *    virtual interrupt source object is distinct from a virtual interrupt.
+ *        A
+ *    virtual interrupt source is always linked to a virtual interrupt, but
+ *        the
+ *    reverse is not true.
+ *
+ *    Each Microvisor virtual interrupt carries a payload of flags which
+ *        may be
+ *    fetched by the recipient of the interrupt. An interrupt payload is a
+ *        @ref
+ *    okl4_word_t sized array of flags, packed into a single word. Flags
+ *        are
+ *    cleared whenever the interrupt recipient fetches the payload with the
+ *        @ref
+ *    okl4_sys_interrupt_get_payload API.
+ *
+ *    The interrupt-modify API allows the caller to pass in a new set of
+ *        flags in
+ *    the \p payload field, and a set of flags to keep from the previous
+ *        payload
+ *    in the \p mask field. If the interrupt has previously been raised and
+ *        not
+ *    yet delivered, the flags accumulate with a mask; that is, each flag
+ *        is the
+ *    boolean OR of the specified value with the boolean AND of its
+ *        previous
+ *    value and the mask.
+ *
+ *    When the recipient has configured the interrupt for edge triggering,
+ *        an
+ *    invocation of this API is counted as a single edge; this triggers
+ *        interrupt
+ *    delivery if the interrupt is not already pending, irrespective of the
+ *    payload. If the interrupt is configured for level triggering, then
+ *        its
+ *    pending state is the boolean OR of its payload flags after any
+ *        specified
+ *    flags are cleared or raised; at least one flag must be set in the new
+ *    payload to permit delivery of a level-triggered interrupt.
+ *
+ * @param virqline
+ *    A virtual interrupt line capability.
+ * @param mask
+ *    A machine-word-sized array of payload flags to preserve.
+ * @param payload
+ *    A machine-word-sized array of payload flags to set.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_modify(okl4_kcap_t virqline, okl4_virq_flags_t mask,
+        okl4_virq_flags_t payload)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)virqline;
+    register uint32_t r1 asm("r1") = (uint32_t)(mask        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((mask >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)(payload        & 0xffffffff);
+    register uint32_t r4 asm("r4") = (uint32_t)((payload >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5195)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_modify(okl4_kcap_t virqline, okl4_virq_flags_t mask,
+        okl4_virq_flags_t payload)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)virqline;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)mask;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)payload;
+    __asm__ __volatile__(
+            "" hvc(5195) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Raise a virtual interrupt, setting specified payload flags.
+ *
+ *    @details
+ *    This triggers a virtual interrupt by raising a virtual interrupt
+ *        source. A
+ *    virtual interrupt source object is distinct from a virtual interrupt.
+ *        A
+ *    virtual interrupt source is always linked to a virtual interrupt, but
+ *        the
+ *    reverse is not true.
+ *
+ *    Each Microvisor virtual interrupt carries a payload of flags which
+ *        may be
+ *    fetched by the recipient of the interrupt. An interrupt payload is a
+ *        @ref
+ *    okl4_word_t sized array of flags, packed into a single word. Flags
+ *        are
+ *    cleared whenever the interrupt recipient fetches the payload with the
+ *        @ref
+ *    okl4_sys_interrupt_get_payload API.
+ *
+ *    The interrupt-raise API allows the caller to pass in a new set of
+ *        flags in
+ *    the \p payload field. If the interrupt has previously been raised and
+ *        not
+ *    yet delivered, the flags accumulate; that is, each flag is the
+ *        boolean OR
+ *    of its previous value and the specified value.
+ *
+ *    When the recipient has configured the interrupt for edge triggering,
+ *        an
+ *    invocation of this API is counted as a single edge; this triggers
+ *        interrupt
+ *    delivery if the interrupt is not already pending, irrespective of the
+ *    payload. If the interrupt is configured for level triggering, then
+ *        its
+ *    pending state is the boolean OR of its payload flags after any
+ *        specified
+ *    flags are raised; at least one flag must be set in the new payload to
+ *    permit delivery of a level-triggered interrupt.
+ *
+ *    @note Invoking this API is equivalent to invoking the @ref
+ *    okl4_sys_vinterrupt_modify API with all bits set in the \p mask
+ *        value.
+ *
+ *    @note This API is distinct from the @ref okl4_sys_interrupt_raise
+ *        API,
+ *    which raises a local software-generated interrupt without requiring
+ *        an
+ *    explicit capability.
+ *
+ * @param virqline
+ *    A virtual interrupt line capability.
+ * @param payload
+ *    A machine-word-sized array of payload flags to set.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_raise(okl4_kcap_t virqline, okl4_virq_flags_t payload)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)virqline;
+    register uint32_t r1 asm("r1") = (uint32_t)(payload        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((payload >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5196)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_raise(okl4_kcap_t virqline, okl4_virq_flags_t payload)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)virqline;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)payload;
+    __asm__ __volatile__(
+            "" hvc(5196) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+
+/*lint -restore */
+
+#endif /* !ASSEMBLY */
+
+/*
+ * Assembly system call prototypes / numbers.
+ */
+
+/** @addtogroup lib_microvisor_syscall_numbers Microvisor System Call Numbers
+ * @{
+ */
+#define OKL4_SYSCALL_AXON_PROCESS_RECV 5184
+
+#define OKL4_SYSCALL_AXON_SET_HALTED 5186
+
+#define OKL4_SYSCALL_AXON_SET_RECV_AREA 5187
+
+#define OKL4_SYSCALL_AXON_SET_RECV_QUEUE 5188
+
+#define OKL4_SYSCALL_AXON_SET_RECV_SEGMENT 5189
+
+#define OKL4_SYSCALL_AXON_SET_SEND_AREA 5190
+
+#define OKL4_SYSCALL_AXON_SET_SEND_QUEUE 5191
+
+#define OKL4_SYSCALL_AXON_SET_SEND_SEGMENT 5192
+
+#define OKL4_SYSCALL_AXON_TRIGGER_SEND 5185
+
+#define OKL4_SYSCALL_INTERRUPT_ACK 5128
+
+#define OKL4_SYSCALL_INTERRUPT_ATTACH_PRIVATE 5134
+
+#define OKL4_SYSCALL_INTERRUPT_ATTACH_SHARED 5135
+
+#define OKL4_SYSCALL_INTERRUPT_DETACH 5136
+
+#define OKL4_SYSCALL_INTERRUPT_DIST_ENABLE 5133
+
+#define OKL4_SYSCALL_INTERRUPT_EOI 5129
+
+#define OKL4_SYSCALL_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING 5137
+
+#define OKL4_SYSCALL_INTERRUPT_GET_PAYLOAD 5132
+
+#define OKL4_SYSCALL_INTERRUPT_LIMITS 5138
+
+#define OKL4_SYSCALL_INTERRUPT_MASK 5130
+
+#define OKL4_SYSCALL_INTERRUPT_RAISE 5145
+
+#define OKL4_SYSCALL_INTERRUPT_SET_BINARY_POINT 5139
+
+#define OKL4_SYSCALL_INTERRUPT_SET_CONFIG 5140
+
+#define OKL4_SYSCALL_INTERRUPT_SET_CONTROL 5141
+
+#define OKL4_SYSCALL_INTERRUPT_SET_PRIORITY 5142
+
+#define OKL4_SYSCALL_INTERRUPT_SET_PRIORITY_MASK 5143
+
+#define OKL4_SYSCALL_INTERRUPT_SET_TARGETS 5144
+
+#define OKL4_SYSCALL_INTERRUPT_UNMASK 5131
+
+#define OKL4_SYSCALL_KDB_INTERACT 5120
+
+#define OKL4_SYSCALL_KDB_SET_OBJECT_NAME 5121
+
+#define OKL4_SYSCALL_KSP_PROCEDURE_CALL 5197
+
+#define OKL4_SYSCALL_MMU_ATTACH_SEGMENT 5152
+
+#define OKL4_SYSCALL_MMU_DETACH_SEGMENT 5153
+
+#define OKL4_SYSCALL_MMU_FLUSH_RANGE 5154
+
+#define OKL4_SYSCALL_MMU_FLUSH_RANGE_PN 5155
+
+#define OKL4_SYSCALL_MMU_LOOKUP_PAGE 5156
+
+#define OKL4_SYSCALL_MMU_LOOKUP_PN 5157
+
+#define OKL4_SYSCALL_MMU_MAP_PAGE 5158
+
+#define OKL4_SYSCALL_MMU_MAP_PN 5159
+
+#define OKL4_SYSCALL_MMU_UNMAP_PAGE 5160
+
+#define OKL4_SYSCALL_MMU_UNMAP_PN 5161
+
+#define OKL4_SYSCALL_MMU_UPDATE_PAGE_ATTRS 5162
+
+#define OKL4_SYSCALL_MMU_UPDATE_PAGE_PERMS 5163
+
+#define OKL4_SYSCALL_MMU_UPDATE_PN_ATTRS 5164
+
+#define OKL4_SYSCALL_MMU_UPDATE_PN_PERMS 5165
+
+#define OKL4_SYSCALL_PERFORMANCE_NULL_SYSCALL 5198
+
+#define OKL4_SYSCALL_PIPE_CONTROL 5146
+
+#define OKL4_SYSCALL_PIPE_RECV 5147
+
+#define OKL4_SYSCALL_PIPE_SEND 5148
+
+#define OKL4_SYSCALL_PRIORITY_WAIVE 5151
+
+#define OKL4_SYSCALL_REMOTE_GET_REGISTER 5200
+
+#define OKL4_SYSCALL_REMOTE_GET_REGISTERS 5201
+
+#define OKL4_SYSCALL_REMOTE_READ_MEMORY32 5202
+
+#define OKL4_SYSCALL_REMOTE_SET_REGISTER 5203
+
+#define OKL4_SYSCALL_REMOTE_SET_REGISTERS 5204
+
+#define OKL4_SYSCALL_REMOTE_WRITE_MEMORY32 5205
+
+#define OKL4_SYSCALL_SCHEDULE_METRICS_STATUS_SUSPENDED 5206
+
+#define OKL4_SYSCALL_SCHEDULE_METRICS_WATCH_SUSPENDED 5207
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_CPU_DISABLE 5168
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_CPU_ENABLE 5169
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_CPU_GET_DATA 5170
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_DISABLE 5171
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_ENABLE 5172
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_GET_DATA 5173
+
+#define OKL4_SYSCALL_SCHEDULER_SUSPEND 5150
+
+#define OKL4_SYSCALL_TIMER_CANCEL 5176
+
+#define OKL4_SYSCALL_TIMER_GET_RESOLUTION 5177
+
+#define OKL4_SYSCALL_TIMER_GET_TIME 5178
+
+#define OKL4_SYSCALL_TIMER_QUERY 5179
+
+#define OKL4_SYSCALL_TIMER_START 5180
+
+#define OKL4_SYSCALL_TRACEBUFFER_SYNC 5199
+
+#define OKL4_SYSCALL_VCPU_RESET 5122
+
+#define OKL4_SYSCALL_VCPU_START 5123
+
+#define OKL4_SYSCALL_VCPU_STOP 5124
+
+#define OKL4_SYSCALL_VCPU_SWITCH_MODE 5125
+
+#define OKL4_SYSCALL_VCPU_SYNC_SEV 5126
+
+#define OKL4_SYSCALL_VCPU_SYNC_WFE 5127
+
+#define OKL4_SYSCALL_VINTERRUPT_CLEAR_AND_RAISE 5194
+
+#define OKL4_SYSCALL_VINTERRUPT_MODIFY 5195
+
+#define OKL4_SYSCALL_VINTERRUPT_RAISE 5196
+
+/** @} */
+#undef hvc
+
+#if defined(_definitions_for_linters)
+/* Ignore lint identifier clashes for syscall names. */
+/*lint -esym(621, _okl4_sys_axon_process_recv) */
+/*lint -esym(621, _okl4_sys_axon_set_halted) */
+/*lint -esym(621, _okl4_sys_axon_set_recv_area) */
+/*lint -esym(621, _okl4_sys_axon_set_recv_queue) */
+/*lint -esym(621, _okl4_sys_axon_set_recv_segment) */
+/*lint -esym(621, _okl4_sys_axon_set_send_area) */
+/*lint -esym(621, _okl4_sys_axon_set_send_queue) */
+/*lint -esym(621, _okl4_sys_axon_set_send_segment) */
+/*lint -esym(621, _okl4_sys_axon_trigger_send) */
+/*lint -esym(621, _okl4_sys_interrupt_ack) */
+/*lint -esym(621, _okl4_sys_interrupt_attach_private) */
+/*lint -esym(621, _okl4_sys_interrupt_attach_shared) */
+/*lint -esym(621, _okl4_sys_interrupt_detach) */
+/*lint -esym(621, _okl4_sys_interrupt_dist_enable) */
+/*lint -esym(621, _okl4_sys_interrupt_eoi) */
+/*lint -esym(621, _okl4_sys_interrupt_get_highest_priority_pending) */
+/*lint -esym(621, _okl4_sys_interrupt_get_payload) */
+/*lint -esym(621, _okl4_sys_interrupt_limits) */
+/*lint -esym(621, _okl4_sys_interrupt_mask) */
+/*lint -esym(621, _okl4_sys_interrupt_raise) */
+/*lint -esym(621, _okl4_sys_interrupt_set_binary_point) */
+/*lint -esym(621, _okl4_sys_interrupt_set_config) */
+/*lint -esym(621, _okl4_sys_interrupt_set_control) */
+/*lint -esym(621, _okl4_sys_interrupt_set_priority) */
+/*lint -esym(621, _okl4_sys_interrupt_set_priority_mask) */
+/*lint -esym(621, _okl4_sys_interrupt_set_targets) */
+/*lint -esym(621, _okl4_sys_interrupt_unmask) */
+/*lint -esym(621, _okl4_sys_kdb_interact) */
+/*lint -esym(621, _okl4_sys_kdb_set_object_name) */
+/*lint -esym(621, _okl4_sys_ksp_procedure_call) */
+/*lint -esym(621, _okl4_sys_mmu_attach_segment) */
+/*lint -esym(621, _okl4_sys_mmu_detach_segment) */
+/*lint -esym(621, _okl4_sys_mmu_flush_range) */
+/*lint -esym(621, _okl4_sys_mmu_flush_range_pn) */
+/*lint -esym(621, _okl4_sys_mmu_lookup_page) */
+/*lint -esym(621, _okl4_sys_mmu_lookup_pn) */
+/*lint -esym(621, _okl4_sys_mmu_map_page) */
+/*lint -esym(621, _okl4_sys_mmu_map_pn) */
+/*lint -esym(621, _okl4_sys_mmu_unmap_page) */
+/*lint -esym(621, _okl4_sys_mmu_unmap_pn) */
+/*lint -esym(621, _okl4_sys_mmu_update_page_attrs) */
+/*lint -esym(621, _okl4_sys_mmu_update_page_perms) */
+/*lint -esym(621, _okl4_sys_mmu_update_pn_attrs) */
+/*lint -esym(621, _okl4_sys_mmu_update_pn_perms) */
+/*lint -esym(621, _okl4_sys_performance_null_syscall) */
+/*lint -esym(621, _okl4_sys_pipe_control) */
+/*lint -esym(621, _okl4_sys_pipe_recv) */
+/*lint -esym(621, _okl4_sys_pipe_send) */
+/*lint -esym(621, _okl4_sys_priority_waive) */
+/*lint -esym(621, _okl4_sys_remote_get_register) */
+/*lint -esym(621, _okl4_sys_remote_get_registers) */
+/*lint -esym(621, _okl4_sys_remote_read_memory32) */
+/*lint -esym(621, _okl4_sys_remote_set_register) */
+/*lint -esym(621, _okl4_sys_remote_set_registers) */
+/*lint -esym(621, _okl4_sys_remote_write_memory32) */
+/*lint -esym(621, _okl4_sys_schedule_metrics_status_suspended) */
+/*lint -esym(621, _okl4_sys_schedule_metrics_watch_suspended) */
+/*lint -esym(621, _okl4_sys_schedule_profile_cpu_disable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_cpu_enable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_cpu_get_data) */
+/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_disable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_enable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_get_data) */
+/*lint -esym(621, _okl4_sys_scheduler_suspend) */
+/*lint -esym(621, _okl4_sys_timer_cancel) */
+/*lint -esym(621, _okl4_sys_timer_get_resolution) */
+/*lint -esym(621, _okl4_sys_timer_get_time) */
+/*lint -esym(621, _okl4_sys_timer_query) */
+/*lint -esym(621, _okl4_sys_timer_start) */
+/*lint -esym(621, _okl4_sys_tracebuffer_sync) */
+/*lint -esym(621, _okl4_sys_vcpu_reset) */
+/*lint -esym(621, _okl4_sys_vcpu_start) */
+/*lint -esym(621, _okl4_sys_vcpu_stop) */
+/*lint -esym(621, _okl4_sys_vcpu_switch_mode) */
+/*lint -esym(621, _okl4_sys_vcpu_sync_sev) */
+/*lint -esym(621, _okl4_sys_vcpu_sync_wfe) */
+/*lint -esym(621, _okl4_sys_vinterrupt_clear_and_raise) */
+/*lint -esym(621, _okl4_sys_vinterrupt_modify) */
+/*lint -esym(621, _okl4_sys_vinterrupt_raise) */
+#endif
+#endif /* __AUTO__USER_SYSCALLS_H__ */
+/** @} */
diff --git a/include/microvisor/kernel/types.h b/include/microvisor/kernel/types.h
new file mode 100644
index 0000000..c87285c
--- /dev/null
+++ b/include/microvisor/kernel/types.h
@@ -0,0 +1,16064 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+/** @addtogroup lib_microvisor
+ * @{
+ */
+/** @addtogroup lib_microvisor_types Microvisor Types
+ * @{
+ */
+#ifndef __AUTO__MICROVISOR_TYPES_H__
+#define __AUTO__MICROVISOR_TYPES_H__
+
+#if !defined(ASSEMBLY)
+
+#define OKL4_DEFAULT_PERMS OKL4_PAGE_PERMS_RWX
+#define OKL4_DEFAULT_CACHE_ATTRIBUTES OKL4_PAGE_CACHE_DEFAULT
+
+#if __SIZEOF_POINTER__ != 8
+#define __ptr64(type, name) union { type name; uint64_t _x_##name; }
+#define __ptr64_array(type, name) union { type val; uint64_t _x; } name
+#else
+#define __ptr64(type, name) type name
+#define __ptr64_array(type, name) type name
+#endif
+
+/**
+    The `okl4_bool_t` type represents a standard boolean value.  Valid values are
+    restricted to @ref OKL4_TRUE and @ref OKL4_FALSE.
+*/
+
+typedef _Bool okl4_bool_t;
+
+
+
+
+
+
+
+
+/**
+    - BITS 7..0 -   @ref OKL4_MASK_AFF0_ARM_MPIDR
+    - BITS 15..8 -   @ref OKL4_MASK_AFF1_ARM_MPIDR
+    - BITS 23..16 -   @ref OKL4_MASK_AFF2_ARM_MPIDR
+    - BIT 24 -   @ref OKL4_MASK_MT_ARM_MPIDR
+    - BIT 30 -   @ref OKL4_MASK_U_ARM_MPIDR
+    - BIT 31 -   @ref OKL4_MASK_MP_ARM_MPIDR
+    - BITS 39..32 -   @ref OKL4_MASK_AFF3_ARM_MPIDR
+*/
+
+/*lint -esym(621, okl4_arm_mpidr_t) */
+typedef uint64_t okl4_arm_mpidr_t;
+
+/*lint -esym(621, okl4_arm_mpidr_getaff0) */
+/*lint -esym(714, okl4_arm_mpidr_getaff0) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff0(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff0) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff0(okl4_arm_mpidr_t *x, uint64_t _aff0);
+
+/*lint -esym(621, okl4_arm_mpidr_getaff1) */
+/*lint -esym(714, okl4_arm_mpidr_getaff1) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff1(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff1) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff1(okl4_arm_mpidr_t *x, uint64_t _aff1);
+
+/*lint -esym(621, okl4_arm_mpidr_getaff2) */
+/*lint -esym(714, okl4_arm_mpidr_getaff2) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff2(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff2) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff2(okl4_arm_mpidr_t *x, uint64_t _aff2);
+
+/*lint -esym(621, okl4_arm_mpidr_getaff3) */
+/*lint -esym(714, okl4_arm_mpidr_getaff3) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff3(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff3) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff3(okl4_arm_mpidr_t *x, uint64_t _aff3);
+
+/*lint -esym(621, okl4_arm_mpidr_getmt) */
+/*lint -esym(714, okl4_arm_mpidr_getmt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmt(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setmt) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setmt(okl4_arm_mpidr_t *x, okl4_bool_t _mt);
+
+/*lint -esym(621, okl4_arm_mpidr_getu) */
+/*lint -esym(714, okl4_arm_mpidr_getu) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getu(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setu) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setu(okl4_arm_mpidr_t *x, okl4_bool_t _u);
+
+/*lint -esym(621, okl4_arm_mpidr_getmp) */
+/*lint -esym(714, okl4_arm_mpidr_getmp) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmp(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(714, okl4_arm_mpidr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_init(okl4_arm_mpidr_t *x);
+
+/*lint -esym(714, okl4_arm_mpidr_cast) */
+OKL4_FORCE_INLINE okl4_arm_mpidr_t
+okl4_arm_mpidr_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF0_MASK) */
+#define OKL4_ARM_MPIDR_AFF0_MASK ((okl4_arm_mpidr_t)255U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF0_ARM_MPIDR) */
+#define OKL4_MASK_AFF0_ARM_MPIDR ((okl4_arm_mpidr_t)255U)
+/*lint -esym(621, OKL4_SHIFT_AFF0_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF0_ARM_MPIDR (0)
+/*lint -esym(621, OKL4_WIDTH_AFF0_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF0_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF1_MASK) */
+#define OKL4_ARM_MPIDR_AFF1_MASK ((okl4_arm_mpidr_t)255U << 8) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF1_ARM_MPIDR) */
+#define OKL4_MASK_AFF1_ARM_MPIDR ((okl4_arm_mpidr_t)255U << 8)
+/*lint -esym(621, OKL4_SHIFT_AFF1_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_WIDTH_AFF1_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF2_MASK) */
+#define OKL4_ARM_MPIDR_AFF2_MASK ((okl4_arm_mpidr_t)255U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF2_ARM_MPIDR) */
+#define OKL4_MASK_AFF2_ARM_MPIDR ((okl4_arm_mpidr_t)255U << 16)
+/*lint -esym(621, OKL4_SHIFT_AFF2_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF2_ARM_MPIDR (16)
+/*lint -esym(621, OKL4_WIDTH_AFF2_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF2_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ARM_MPIDR_MT_MASK) */
+#define OKL4_ARM_MPIDR_MT_MASK ((okl4_arm_mpidr_t)1U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MT_ARM_MPIDR) */
+#define OKL4_MASK_MT_ARM_MPIDR ((okl4_arm_mpidr_t)1U << 24)
+/*lint -esym(621, OKL4_SHIFT_MT_ARM_MPIDR) */
+#define OKL4_SHIFT_MT_ARM_MPIDR (24)
+/*lint -esym(621, OKL4_WIDTH_MT_ARM_MPIDR) */
+#define OKL4_WIDTH_MT_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ARM_MPIDR_U_MASK) */
+#define OKL4_ARM_MPIDR_U_MASK ((okl4_arm_mpidr_t)1U << 30) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_U_ARM_MPIDR) */
+#define OKL4_MASK_U_ARM_MPIDR ((okl4_arm_mpidr_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_U_ARM_MPIDR) */
+#define OKL4_SHIFT_U_ARM_MPIDR (30)
+/*lint -esym(621, OKL4_WIDTH_U_ARM_MPIDR) */
+#define OKL4_WIDTH_U_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ARM_MPIDR_MP_MASK) */
+#define OKL4_ARM_MPIDR_MP_MASK ((okl4_arm_mpidr_t)1U << 31) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MP_ARM_MPIDR) */
+#define OKL4_MASK_MP_ARM_MPIDR ((okl4_arm_mpidr_t)1U << 31)
+/*lint -esym(621, OKL4_SHIFT_MP_ARM_MPIDR) */
+#define OKL4_SHIFT_MP_ARM_MPIDR (31)
+/*lint -esym(621, OKL4_WIDTH_MP_ARM_MPIDR) */
+#define OKL4_WIDTH_MP_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF3_MASK) */
+#define OKL4_ARM_MPIDR_AFF3_MASK ((okl4_arm_mpidr_t)255U << 32) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF3_ARM_MPIDR) */
+#define OKL4_MASK_AFF3_ARM_MPIDR ((okl4_arm_mpidr_t)255U << 32)
+/*lint -esym(621, OKL4_SHIFT_AFF3_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF3_ARM_MPIDR (32)
+/*lint -esym(621, OKL4_WIDTH_AFF3_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF3_ARM_MPIDR (8)
+
+
+/*lint -sem(okl4_arm_mpidr_getaff0, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff0) */
+/*lint -esym(714, okl4_arm_mpidr_getaff0) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff0(const okl4_arm_mpidr_t *x)
+{
+    uint64_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint64_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff0, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff0) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff0) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff0(okl4_arm_mpidr_t *x, uint64_t _aff0)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_aff0;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getaff1, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff1) */
+/*lint -esym(714, okl4_arm_mpidr_getaff1) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff1(const okl4_arm_mpidr_t *x)
+{
+    uint64_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 8;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint64_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff1, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff1) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff1) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff1(okl4_arm_mpidr_t *x, uint64_t _aff1)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 8;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_aff1;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getaff2, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff2) */
+/*lint -esym(714, okl4_arm_mpidr_getaff2) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff2(const okl4_arm_mpidr_t *x)
+{
+    uint64_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 16;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint64_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff2, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff2) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff2) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff2(okl4_arm_mpidr_t *x, uint64_t _aff2)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 16;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_aff2;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getmt, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_mpidr_getmt) */
+/*lint -esym(714, okl4_arm_mpidr_getmt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmt(const okl4_arm_mpidr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 24;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setmt, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_mpidr_setmt) */
+
+/*lint -esym(621, okl4_arm_mpidr_setmt) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setmt(okl4_arm_mpidr_t *x, okl4_bool_t _mt)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 24;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_mt;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getu, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_mpidr_getu) */
+/*lint -esym(714, okl4_arm_mpidr_getu) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getu(const okl4_arm_mpidr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setu, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_mpidr_setu) */
+
+/*lint -esym(621, okl4_arm_mpidr_setu) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setu(okl4_arm_mpidr_t *x, okl4_bool_t _u)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_u;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getmp, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_mpidr_getmp) */
+/*lint -esym(714, okl4_arm_mpidr_getmp) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmp(const okl4_arm_mpidr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 31;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_getaff3, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff3) */
+/*lint -esym(714, okl4_arm_mpidr_getaff3) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff3(const okl4_arm_mpidr_t *x)
+{
+    uint64_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 32;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint64_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff3, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff3) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff3) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff3(okl4_arm_mpidr_t *x, uint64_t _aff3)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 32;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_aff3;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_arm_mpidr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_init(okl4_arm_mpidr_t *x)
+{
+    *x = (okl4_arm_mpidr_t)2147483648U;
+}
+
+/*lint -esym(714, okl4_arm_mpidr_cast) */
+OKL4_FORCE_INLINE okl4_arm_mpidr_t
+okl4_arm_mpidr_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_arm_mpidr_t x = (okl4_arm_mpidr_t)p;
+    if (force) {
+        x &= ~(okl4_arm_mpidr_t)0x80000000U;
+        x |= (okl4_arm_mpidr_t)0x80000000U; /* x.mp */
+    }
+    return x;
+}
+
+
+
+
+/*lint -esym(621, OKL4_AXON_NUM_RECEIVE_QUEUES) */
+#define OKL4_AXON_NUM_RECEIVE_QUEUES ((uint32_t)(4U))
+
+/*lint -esym(621, OKL4_AXON_NUM_SEND_QUEUES) */
+#define OKL4_AXON_NUM_SEND_QUEUES ((uint32_t)(4U))
+
+/*lint -esym(621, _OKL4_POISON) */
+#define _OKL4_POISON ((uint32_t)(3735928559U))
+
+/*lint -esym(621, OKL4_TRACEBUFFER_INVALID_REF) */
+#define OKL4_TRACEBUFFER_INVALID_REF ((uint32_t)(0xffffffffU))
+
+
+
+
+typedef uint32_t okl4_arm_psci_function_t;
+
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_VERSION) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_VERSION ((okl4_arm_psci_function_t)0x0U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_SUSPEND) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_SUSPEND ((okl4_arm_psci_function_t)0x1U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_OFF) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_OFF ((okl4_arm_psci_function_t)0x2U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_ON) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_ON ((okl4_arm_psci_function_t)0x3U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_AFFINITY_INFO) */
+#define OKL4_ARM_PSCI_FUNCTION_AFFINITY_INFO ((okl4_arm_psci_function_t)0x4U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MIGRATE) */
+#define OKL4_ARM_PSCI_FUNCTION_MIGRATE ((okl4_arm_psci_function_t)0x5U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE) */
+#define OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE ((okl4_arm_psci_function_t)0x6U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU) */
+#define OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU ((okl4_arm_psci_function_t)0x7U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_OFF) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_OFF ((okl4_arm_psci_function_t)0x8U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET ((okl4_arm_psci_function_t)0x9U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_FEATURES) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_FEATURES ((okl4_arm_psci_function_t)0xaU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_FREEZE) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_FREEZE ((okl4_arm_psci_function_t)0xbU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND ((okl4_arm_psci_function_t)0xcU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_NODE_HW_STATE) */
+#define OKL4_ARM_PSCI_FUNCTION_NODE_HW_STATE ((okl4_arm_psci_function_t)0xdU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND ((okl4_arm_psci_function_t)0xeU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE ((okl4_arm_psci_function_t)0xfU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY ((okl4_arm_psci_function_t)0x10U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT ((okl4_arm_psci_function_t)0x11U)
+
+/*lint -esym(714, okl4_arm_psci_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_function_is_element_of(okl4_arm_psci_function_t var);
+
+
+/*lint -esym(714, okl4_arm_psci_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_function_is_element_of(okl4_arm_psci_function_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ARM_PSCI_FUNCTION_PSCI_VERSION) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_SUSPEND) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_OFF) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_ON) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_AFFINITY_INFO) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_MIGRATE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_OFF) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_PSCI_FEATURES) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_FREEZE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_NODE_HW_STATE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT));
+}
+
+
+
+typedef uint32_t okl4_arm_psci_result_t;
+
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_SUCCESS) */
+#define OKL4_ARM_PSCI_RESULT_SUCCESS ((okl4_arm_psci_result_t)0x0U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_INVALID_ADDRESS) */
+#define OKL4_ARM_PSCI_RESULT_INVALID_ADDRESS ((okl4_arm_psci_result_t)0xfffffff7U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_DISABLED) */
+#define OKL4_ARM_PSCI_RESULT_DISABLED ((okl4_arm_psci_result_t)0xfffffff8U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_NOT_PRESENT) */
+#define OKL4_ARM_PSCI_RESULT_NOT_PRESENT ((okl4_arm_psci_result_t)0xfffffff9U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_INTERNAL_FAILURE) */
+#define OKL4_ARM_PSCI_RESULT_INTERNAL_FAILURE ((okl4_arm_psci_result_t)0xfffffffaU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_ON_PENDING) */
+#define OKL4_ARM_PSCI_RESULT_ON_PENDING ((okl4_arm_psci_result_t)0xfffffffbU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_ALREADY_ON) */
+#define OKL4_ARM_PSCI_RESULT_ALREADY_ON ((okl4_arm_psci_result_t)0xfffffffcU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_DENIED) */
+#define OKL4_ARM_PSCI_RESULT_DENIED ((okl4_arm_psci_result_t)0xfffffffdU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_INVALID_PARAMETERS) */
+#define OKL4_ARM_PSCI_RESULT_INVALID_PARAMETERS ((okl4_arm_psci_result_t)0xfffffffeU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_NOT_SUPPORTED) */
+#define OKL4_ARM_PSCI_RESULT_NOT_SUPPORTED ((okl4_arm_psci_result_t)0xffffffffU)
+
+/*lint -esym(714, okl4_arm_psci_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_result_is_element_of(okl4_arm_psci_result_t var);
+
+
+/*lint -esym(714, okl4_arm_psci_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_result_is_element_of(okl4_arm_psci_result_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ARM_PSCI_RESULT_SUCCESS) ||
+            (var == OKL4_ARM_PSCI_RESULT_NOT_SUPPORTED) ||
+            (var == OKL4_ARM_PSCI_RESULT_INVALID_PARAMETERS) ||
+            (var == OKL4_ARM_PSCI_RESULT_DENIED) ||
+            (var == OKL4_ARM_PSCI_RESULT_ALREADY_ON) ||
+            (var == OKL4_ARM_PSCI_RESULT_ON_PENDING) ||
+            (var == OKL4_ARM_PSCI_RESULT_INTERNAL_FAILURE) ||
+            (var == OKL4_ARM_PSCI_RESULT_NOT_PRESENT) ||
+            (var == OKL4_ARM_PSCI_RESULT_DISABLED) ||
+            (var == OKL4_ARM_PSCI_RESULT_INVALID_ADDRESS));
+}
+
+
+/**
+    - BITS 15..0 -   @ref OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE
+    - BIT 16 -   @ref OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE
+    - BITS 25..24 -   @ref OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE
+*/
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_t) */
+typedef uint32_t okl4_arm_psci_suspend_state_t;
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_getstateid) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getstateid) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getstateid(const okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setstateid) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setstateid(okl4_arm_psci_suspend_state_t *x, uint32_t _state_id);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerdown) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerdown) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_suspend_state_getpowerdown(const okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerdown) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerdown(okl4_arm_psci_suspend_state_t *x, okl4_bool_t _power_down);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerlevel) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerlevel) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getpowerlevel(const okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerlevel) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerlevel(okl4_arm_psci_suspend_state_t *x, uint32_t _power_level);
+
+/*lint -esym(714, okl4_arm_psci_suspend_state_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_init(okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(714, okl4_arm_psci_suspend_state_cast) */
+OKL4_FORCE_INLINE okl4_arm_psci_suspend_state_t
+okl4_arm_psci_suspend_state_cast(uint32_t p, okl4_bool_t force);
+
+
+
+/*lint -esym(621, OKL4_ARM_PSCI_POWER_LEVEL_CPU) */
+#define OKL4_ARM_PSCI_POWER_LEVEL_CPU ((okl4_arm_psci_suspend_state_t)(0U))
+
+/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_STATE_ID_MASK) */
+#define OKL4_ARM_PSCI_SUSPEND_STATE_STATE_ID_MASK ((okl4_arm_psci_suspend_state_t)65535U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)65535U)
+/*lint -esym(621, OKL4_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE (0)
+/*lint -esym(621, OKL4_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_POWER_DOWN_MASK) */
+#define OKL4_ARM_PSCI_SUSPEND_STATE_POWER_DOWN_MASK ((okl4_arm_psci_suspend_state_t)1U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)1U << 16)
+/*lint -esym(621, OKL4_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1)
+/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_POWER_LEVEL_MASK) */
+#define OKL4_ARM_PSCI_SUSPEND_STATE_POWER_LEVEL_MASK ((okl4_arm_psci_suspend_state_t)3U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)3U << 24)
+/*lint -esym(621, OKL4_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (24)
+/*lint -esym(621, OKL4_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (2)
+
+
+/*lint -sem(okl4_arm_psci_suspend_state_getstateid, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_arm_psci_suspend_state_getstateid) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getstateid) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getstateid(const okl4_arm_psci_suspend_state_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_psci_suspend_state_setstateid, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_setstateid) */
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setstateid) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setstateid(okl4_arm_psci_suspend_state_t *x, uint32_t _state_id)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_state_id;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_psci_suspend_state_getpowerdown, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerdown) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerdown) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_suspend_state_getpowerdown(const okl4_arm_psci_suspend_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_psci_suspend_state_setpowerdown, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_setpowerdown) */
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerdown) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerdown(okl4_arm_psci_suspend_state_t *x, okl4_bool_t _power_down)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_power_down;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_psci_suspend_state_getpowerlevel, 1p, @n >= 0 && @n <= 3) */
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerlevel) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerlevel) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getpowerlevel(const okl4_arm_psci_suspend_state_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 2;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_psci_suspend_state_setpowerlevel, 2n >= 0 && 2n <= 3) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_setpowerlevel) */
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerlevel) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerlevel(okl4_arm_psci_suspend_state_t *x, uint32_t _power_level)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 2;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_power_level;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_arm_psci_suspend_state_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_init(okl4_arm_psci_suspend_state_t *x)
+{
+    *x = (okl4_arm_psci_suspend_state_t)0U;
+}
+
+/*lint -esym(714, okl4_arm_psci_suspend_state_cast) */
+OKL4_FORCE_INLINE okl4_arm_psci_suspend_state_t
+okl4_arm_psci_suspend_state_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_arm_psci_suspend_state_t x = (okl4_arm_psci_suspend_state_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_MMU_ENABLE_ARM_SCTLR
+    - BIT 1 -   @ref OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR
+    - BIT 2 -   @ref OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR
+    - BIT 3 -   @ref OKL4_MASK_STACK_ALIGN_ARM_SCTLR
+    - BIT 4 -   @ref OKL4_MASK_STACK_ALIGN_EL0_ARM_SCTLR
+    - BIT 5 -   @ref OKL4_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR
+    - BIT 6 -   @ref OKL4_MASK_OKL_HCR_EL2_DC_ARM_SCTLR
+    - BIT 7 -   @ref OKL4_MASK_IT_DISABLE_ARM_SCTLR
+    - BIT 8 -   @ref OKL4_MASK_SETEND_DISABLE_ARM_SCTLR
+    - BIT 9 -   @ref OKL4_MASK_USER_MASK_ACCESS_ARM_SCTLR
+    - BIT 11 -   @ref OKL4_MASK_RESERVED11_ARM_SCTLR
+    - BIT 12 -   @ref OKL4_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR
+    - BIT 13 -   @ref OKL4_MASK_VECTORS_BIT_ARM_SCTLR
+    - BIT 14 -   @ref OKL4_MASK_DCACHE_ZERO_ARM_SCTLR
+    - BIT 15 -   @ref OKL4_MASK_USER_CACHE_TYPE_ARM_SCTLR
+    - BIT 16 -   @ref OKL4_MASK_NO_TRAP_WFI_ARM_SCTLR
+    - BIT 18 -   @ref OKL4_MASK_NO_TRAP_WFE_ARM_SCTLR
+    - BIT 19 -   @ref OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR
+    - BIT 20 -   @ref OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR
+    - BIT 22 -   @ref OKL4_MASK_RESERVED22_ARM_SCTLR
+    - BIT 23 -   @ref OKL4_MASK_RESERVED23_ARM_SCTLR
+    - BIT 24 -   @ref OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR
+    - BIT 25 -   @ref OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR
+    - BIT 28 -   @ref OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR
+    - BIT 29 -   @ref OKL4_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR
+    - BIT 30 -   @ref OKL4_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR
+*/
+
+/*lint -esym(621, okl4_arm_sctlr_t) */
+typedef uint32_t okl4_arm_sctlr_t;
+
+/*lint -esym(621, okl4_arm_sctlr_getmmuenable) */
+/*lint -esym(714, okl4_arm_sctlr_getmmuenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getmmuenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setmmuenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setmmuenable(okl4_arm_sctlr_t *x, okl4_bool_t _mmu_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getalignmentcheckenable) */
+/*lint -esym(714, okl4_arm_sctlr_getalignmentcheckenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getalignmentcheckenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setalignmentcheckenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setalignmentcheckenable(okl4_arm_sctlr_t *x, okl4_bool_t _alignment_check_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getdatacacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getdatacacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdatacacheenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setdatacacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdatacacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _data_cache_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getinstructioncacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getinstructioncacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getinstructioncacheenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setinstructioncacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setinstructioncacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _instruction_cache_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getcp15barrierenable) */
+/*lint -esym(714, okl4_arm_sctlr_getcp15barrierenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getcp15barrierenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setcp15barrierenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setcp15barrierenable(okl4_arm_sctlr_t *x, okl4_bool_t _cp15_barrier_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getitdisable) */
+/*lint -esym(714, okl4_arm_sctlr_getitdisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getitdisable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setitdisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setitdisable(okl4_arm_sctlr_t *x, okl4_bool_t _it_disable);
+
+/*lint -esym(621, okl4_arm_sctlr_getsetenddisable) */
+/*lint -esym(714, okl4_arm_sctlr_getsetenddisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getsetenddisable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setsetenddisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setsetenddisable(okl4_arm_sctlr_t *x, okl4_bool_t _setend_disable);
+
+/*lint -esym(621, okl4_arm_sctlr_getreserved11) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved11) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved11(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfi) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfi) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfi(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfi) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfi(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfi);
+
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfe) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfe) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfe(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfe) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfe(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfe);
+
+/*lint -esym(621, okl4_arm_sctlr_getwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getwriteexecnever(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _write_exec_never);
+
+/*lint -esym(621, okl4_arm_sctlr_getreserved22) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved22) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved22(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_getreserved23) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved23) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved23(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_getel0endianness) */
+/*lint -esym(714, okl4_arm_sctlr_getel0endianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getel0endianness(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setel0endianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setel0endianness(okl4_arm_sctlr_t *x, okl4_bool_t _el0_endianness);
+
+/*lint -esym(621, okl4_arm_sctlr_getexceptionendianness) */
+/*lint -esym(714, okl4_arm_sctlr_getexceptionendianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getexceptionendianness(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setexceptionendianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setexceptionendianness(okl4_arm_sctlr_t *x, okl4_bool_t _exception_endianness);
+
+/*lint -esym(621, okl4_arm_sctlr_getvectorsbit) */
+/*lint -esym(714, okl4_arm_sctlr_getvectorsbit) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getvectorsbit(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setvectorsbit) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setvectorsbit(okl4_arm_sctlr_t *x, okl4_bool_t _vectors_bit);
+
+/*lint -esym(621, okl4_arm_sctlr_getuserwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getuserwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getuserwriteexecnever(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setuserwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setuserwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _user_write_exec_never);
+
+/*lint -esym(621, okl4_arm_sctlr_gettexremapenable) */
+/*lint -esym(714, okl4_arm_sctlr_gettexremapenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_gettexremapenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_settexremapenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_settexremapenable(okl4_arm_sctlr_t *x, okl4_bool_t _tex_remap_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getaccessflagenable) */
+/*lint -esym(714, okl4_arm_sctlr_getaccessflagenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getaccessflagenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setaccessflagenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setaccessflagenable(okl4_arm_sctlr_t *x, okl4_bool_t _access_flag_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getthumbexceptionenable) */
+/*lint -esym(714, okl4_arm_sctlr_getthumbexceptionenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getthumbexceptionenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setthumbexceptionenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setthumbexceptionenable(okl4_arm_sctlr_t *x, okl4_bool_t _thumb_exception_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getstackalign) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalign(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalign) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalign(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align);
+
+/*lint -esym(621, okl4_arm_sctlr_getstackalignel0) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalignel0) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalignel0(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalignel0) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalignel0(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align_el0);
+
+/*lint -esym(621, okl4_arm_sctlr_getusermaskaccess) */
+/*lint -esym(714, okl4_arm_sctlr_getusermaskaccess) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusermaskaccess(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setusermaskaccess) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusermaskaccess(okl4_arm_sctlr_t *x, okl4_bool_t _user_mask_access);
+
+/*lint -esym(621, okl4_arm_sctlr_getdcachezero) */
+/*lint -esym(714, okl4_arm_sctlr_getdcachezero) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdcachezero(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setdcachezero) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdcachezero(okl4_arm_sctlr_t *x, okl4_bool_t _dcache_zero);
+
+/*lint -esym(621, okl4_arm_sctlr_getusercachetype) */
+/*lint -esym(714, okl4_arm_sctlr_getusercachetype) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusercachetype(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setusercachetype) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusercachetype(okl4_arm_sctlr_t *x, okl4_bool_t _user_cache_type);
+
+/*lint -esym(621, okl4_arm_sctlr_getoklhcrel2dc) */
+/*lint -esym(714, okl4_arm_sctlr_getoklhcrel2dc) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getoklhcrel2dc(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setoklhcrel2dc) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setoklhcrel2dc(okl4_arm_sctlr_t *x, okl4_bool_t _okl_hcr_el2_dc);
+
+/*lint -esym(714, okl4_arm_sctlr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_init(okl4_arm_sctlr_t *x);
+
+/*lint -esym(714, okl4_arm_sctlr_cast) */
+OKL4_FORCE_INLINE okl4_arm_sctlr_t
+okl4_arm_sctlr_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_ARM_SCTLR_MMU_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_MMU_ENABLE_MASK ((okl4_arm_sctlr_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_MMU_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U)
+/*lint -esym(621, OKL4_SHIFT_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_MMU_ENABLE_ARM_SCTLR (0)
+/*lint -esym(621, OKL4_WIDTH_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_MMU_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_ALIGNMENT_CHECK_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_ALIGNMENT_CHECK_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_DATA_CACHE_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_DATA_CACHE_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 2) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 2)
+/*lint -esym(621, OKL4_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR (2)
+/*lint -esym(621, OKL4_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_STACK_ALIGN_MASK) */
+#define OKL4_ARM_SCTLR_STACK_ALIGN_MASK ((okl4_arm_sctlr_t)1U << 3) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_MASK_STACK_ALIGN_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 3)
+/*lint -esym(621, OKL4_SHIFT_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_SHIFT_STACK_ALIGN_ARM_SCTLR (3)
+/*lint -esym(621, OKL4_WIDTH_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_WIDTH_STACK_ALIGN_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_STACK_ALIGN_EL0_MASK) */
+#define OKL4_ARM_SCTLR_STACK_ALIGN_EL0_MASK ((okl4_arm_sctlr_t)1U << 4) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_MASK_STACK_ALIGN_EL0_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 4)
+/*lint -esym(621, OKL4_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR (4)
+/*lint -esym(621, OKL4_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_CP15_BARRIER_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_CP15_BARRIER_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 5) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 5)
+/*lint -esym(621, OKL4_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR (5)
+/*lint -esym(621, OKL4_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_OKL_HCR_EL2_DC_MASK) */
+#define OKL4_ARM_SCTLR_OKL_HCR_EL2_DC_MASK ((okl4_arm_sctlr_t)1U << 6) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_MASK_OKL_HCR_EL2_DC_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 6)
+/*lint -esym(621, OKL4_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR (6)
+/*lint -esym(621, OKL4_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_IT_DISABLE_MASK) */
+#define OKL4_ARM_SCTLR_IT_DISABLE_MASK ((okl4_arm_sctlr_t)1U << 7) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_MASK_IT_DISABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 7)
+/*lint -esym(621, OKL4_SHIFT_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_IT_DISABLE_ARM_SCTLR (7)
+/*lint -esym(621, OKL4_WIDTH_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_IT_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_SETEND_DISABLE_MASK) */
+#define OKL4_ARM_SCTLR_SETEND_DISABLE_MASK ((okl4_arm_sctlr_t)1U << 8) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_MASK_SETEND_DISABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 8)
+/*lint -esym(621, OKL4_SHIFT_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_SETEND_DISABLE_ARM_SCTLR (8)
+/*lint -esym(621, OKL4_WIDTH_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_SETEND_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_USER_MASK_ACCESS_MASK) */
+#define OKL4_ARM_SCTLR_USER_MASK_ACCESS_MASK ((okl4_arm_sctlr_t)1U << 9) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_MASK_USER_MASK_ACCESS_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 9)
+/*lint -esym(621, OKL4_SHIFT_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_SHIFT_USER_MASK_ACCESS_ARM_SCTLR (9)
+/*lint -esym(621, OKL4_WIDTH_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_WIDTH_USER_MASK_ACCESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED11_MASK) */
+#define OKL4_ARM_SCTLR_RESERVED11_MASK ((okl4_arm_sctlr_t)1U << 11) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESERVED11_ARM_SCTLR) */
+#define OKL4_MASK_RESERVED11_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 11)
+/*lint -esym(621, OKL4_SHIFT_RESERVED11_ARM_SCTLR) */
+#define OKL4_SHIFT_RESERVED11_ARM_SCTLR (11)
+/*lint -esym(621, OKL4_WIDTH_RESERVED11_ARM_SCTLR) */
+#define OKL4_WIDTH_RESERVED11_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_INSTRUCTION_CACHE_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_INSTRUCTION_CACHE_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 12) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 12)
+/*lint -esym(621, OKL4_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (12)
+/*lint -esym(621, OKL4_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_VECTORS_BIT_MASK) */
+#define OKL4_ARM_SCTLR_VECTORS_BIT_MASK ((okl4_arm_sctlr_t)1U << 13) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_MASK_VECTORS_BIT_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 13)
+/*lint -esym(621, OKL4_SHIFT_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_SHIFT_VECTORS_BIT_ARM_SCTLR (13)
+/*lint -esym(621, OKL4_WIDTH_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_WIDTH_VECTORS_BIT_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_DCACHE_ZERO_MASK) */
+#define OKL4_ARM_SCTLR_DCACHE_ZERO_MASK ((okl4_arm_sctlr_t)1U << 14) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_MASK_DCACHE_ZERO_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 14)
+/*lint -esym(621, OKL4_SHIFT_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_SHIFT_DCACHE_ZERO_ARM_SCTLR (14)
+/*lint -esym(621, OKL4_WIDTH_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_WIDTH_DCACHE_ZERO_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_USER_CACHE_TYPE_MASK) */
+#define OKL4_ARM_SCTLR_USER_CACHE_TYPE_MASK ((okl4_arm_sctlr_t)1U << 15) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_MASK_USER_CACHE_TYPE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 15)
+/*lint -esym(621, OKL4_SHIFT_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_SHIFT_USER_CACHE_TYPE_ARM_SCTLR (15)
+/*lint -esym(621, OKL4_WIDTH_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_WIDTH_USER_CACHE_TYPE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_NO_TRAP_WFI_MASK) */
+#define OKL4_ARM_SCTLR_NO_TRAP_WFI_MASK ((okl4_arm_sctlr_t)1U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_MASK_NO_TRAP_WFI_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 16)
+/*lint -esym(621, OKL4_SHIFT_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_SHIFT_NO_TRAP_WFI_ARM_SCTLR (16)
+/*lint -esym(621, OKL4_WIDTH_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_WIDTH_NO_TRAP_WFI_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_NO_TRAP_WFE_MASK) */
+#define OKL4_ARM_SCTLR_NO_TRAP_WFE_MASK ((okl4_arm_sctlr_t)1U << 18) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_MASK_NO_TRAP_WFE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 18)
+/*lint -esym(621, OKL4_SHIFT_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_SHIFT_NO_TRAP_WFE_ARM_SCTLR (18)
+/*lint -esym(621, OKL4_WIDTH_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_WIDTH_NO_TRAP_WFE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_WRITE_EXEC_NEVER_MASK) */
+#define OKL4_ARM_SCTLR_WRITE_EXEC_NEVER_MASK ((okl4_arm_sctlr_t)1U << 19) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 19)
+/*lint -esym(621, OKL4_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR (19)
+/*lint -esym(621, OKL4_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_USER_WRITE_EXEC_NEVER_MASK) */
+#define OKL4_ARM_SCTLR_USER_WRITE_EXEC_NEVER_MASK ((okl4_arm_sctlr_t)1U << 20) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 20)
+/*lint -esym(621, OKL4_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR (20)
+/*lint -esym(621, OKL4_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED22_MASK) */
+#define OKL4_ARM_SCTLR_RESERVED22_MASK ((okl4_arm_sctlr_t)1U << 22) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESERVED22_ARM_SCTLR) */
+#define OKL4_MASK_RESERVED22_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 22)
+/*lint -esym(621, OKL4_SHIFT_RESERVED22_ARM_SCTLR) */
+#define OKL4_SHIFT_RESERVED22_ARM_SCTLR (22)
+/*lint -esym(621, OKL4_WIDTH_RESERVED22_ARM_SCTLR) */
+#define OKL4_WIDTH_RESERVED22_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED23_MASK) */
+#define OKL4_ARM_SCTLR_RESERVED23_MASK ((okl4_arm_sctlr_t)1U << 23) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESERVED23_ARM_SCTLR) */
+#define OKL4_MASK_RESERVED23_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 23)
+/*lint -esym(621, OKL4_SHIFT_RESERVED23_ARM_SCTLR) */
+#define OKL4_SHIFT_RESERVED23_ARM_SCTLR (23)
+/*lint -esym(621, OKL4_WIDTH_RESERVED23_ARM_SCTLR) */
+#define OKL4_WIDTH_RESERVED23_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_EL0_ENDIANNESS_MASK) */
+#define OKL4_ARM_SCTLR_EL0_ENDIANNESS_MASK ((okl4_arm_sctlr_t)1U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 24)
+/*lint -esym(621, OKL4_SHIFT_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_SHIFT_EL0_ENDIANNESS_ARM_SCTLR (24)
+/*lint -esym(621, OKL4_WIDTH_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_WIDTH_EL0_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_EXCEPTION_ENDIANNESS_MASK) */
+#define OKL4_ARM_SCTLR_EXCEPTION_ENDIANNESS_MASK ((okl4_arm_sctlr_t)1U << 25) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 25)
+/*lint -esym(621, OKL4_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR (25)
+/*lint -esym(621, OKL4_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_TEX_REMAP_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_TEX_REMAP_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 28) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 28)
+/*lint -esym(621, OKL4_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR (28)
+/*lint -esym(621, OKL4_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_ACCESS_FLAG_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_ACCESS_FLAG_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 29) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 29)
+/*lint -esym(621, OKL4_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR (29)
+/*lint -esym(621, OKL4_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_THUMB_EXCEPTION_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_THUMB_EXCEPTION_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 30) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (30)
+/*lint -esym(621, OKL4_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (1)
+
+
+/*lint -sem(okl4_arm_sctlr_getmmuenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getmmuenable) */
+/*lint -esym(714, okl4_arm_sctlr_getmmuenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getmmuenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setmmuenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setmmuenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setmmuenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setmmuenable(okl4_arm_sctlr_t *x, okl4_bool_t _mmu_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_mmu_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getalignmentcheckenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getalignmentcheckenable) */
+/*lint -esym(714, okl4_arm_sctlr_getalignmentcheckenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getalignmentcheckenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setalignmentcheckenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setalignmentcheckenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setalignmentcheckenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setalignmentcheckenable(okl4_arm_sctlr_t *x, okl4_bool_t _alignment_check_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_alignment_check_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getdatacacheenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getdatacacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getdatacacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdatacacheenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setdatacacheenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setdatacacheenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setdatacacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdatacacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _data_cache_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_data_cache_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getstackalign, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getstackalign) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalign(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setstackalign, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setstackalign) */
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalign) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalign(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_stack_align;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getstackalignel0, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getstackalignel0) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalignel0) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalignel0(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setstackalignel0, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setstackalignel0) */
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalignel0) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalignel0(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align_el0)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_stack_align_el0;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getcp15barrierenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getcp15barrierenable) */
+/*lint -esym(714, okl4_arm_sctlr_getcp15barrierenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getcp15barrierenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setcp15barrierenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setcp15barrierenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setcp15barrierenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setcp15barrierenable(okl4_arm_sctlr_t *x, okl4_bool_t _cp15_barrier_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_cp15_barrier_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getoklhcrel2dc, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getoklhcrel2dc) */
+/*lint -esym(714, okl4_arm_sctlr_getoklhcrel2dc) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getoklhcrel2dc(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 6;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setoklhcrel2dc, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setoklhcrel2dc) */
+
+/*lint -esym(621, okl4_arm_sctlr_setoklhcrel2dc) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setoklhcrel2dc(okl4_arm_sctlr_t *x, okl4_bool_t _okl_hcr_el2_dc)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 6;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_okl_hcr_el2_dc;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getitdisable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getitdisable) */
+/*lint -esym(714, okl4_arm_sctlr_getitdisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getitdisable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setitdisable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setitdisable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setitdisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setitdisable(okl4_arm_sctlr_t *x, okl4_bool_t _it_disable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_it_disable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getsetenddisable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getsetenddisable) */
+/*lint -esym(714, okl4_arm_sctlr_getsetenddisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getsetenddisable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setsetenddisable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setsetenddisable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setsetenddisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setsetenddisable(okl4_arm_sctlr_t *x, okl4_bool_t _setend_disable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_setend_disable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getusermaskaccess, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getusermaskaccess) */
+/*lint -esym(714, okl4_arm_sctlr_getusermaskaccess) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusermaskaccess(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 9;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setusermaskaccess, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setusermaskaccess) */
+
+/*lint -esym(621, okl4_arm_sctlr_setusermaskaccess) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusermaskaccess(okl4_arm_sctlr_t *x, okl4_bool_t _user_mask_access)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 9;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_user_mask_access;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getreserved11, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getreserved11) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved11) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved11(const okl4_arm_sctlr_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 11;
+            uint32_t field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_getinstructioncacheenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getinstructioncacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getinstructioncacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getinstructioncacheenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 12;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setinstructioncacheenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setinstructioncacheenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setinstructioncacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setinstructioncacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _instruction_cache_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 12;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_instruction_cache_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getvectorsbit, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getvectorsbit) */
+/*lint -esym(714, okl4_arm_sctlr_getvectorsbit) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getvectorsbit(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 13;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setvectorsbit, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setvectorsbit) */
+
+/*lint -esym(621, okl4_arm_sctlr_setvectorsbit) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setvectorsbit(okl4_arm_sctlr_t *x, okl4_bool_t _vectors_bit)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 13;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_vectors_bit;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getdcachezero, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getdcachezero) */
+/*lint -esym(714, okl4_arm_sctlr_getdcachezero) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdcachezero(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 14;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setdcachezero, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setdcachezero) */
+
+/*lint -esym(621, okl4_arm_sctlr_setdcachezero) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdcachezero(okl4_arm_sctlr_t *x, okl4_bool_t _dcache_zero)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 14;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_dcache_zero;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getusercachetype, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getusercachetype) */
+/*lint -esym(714, okl4_arm_sctlr_getusercachetype) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusercachetype(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 15;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setusercachetype, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setusercachetype) */
+
+/*lint -esym(621, okl4_arm_sctlr_setusercachetype) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusercachetype(okl4_arm_sctlr_t *x, okl4_bool_t _user_cache_type)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 15;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_user_cache_type;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getnotrapwfi, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfi) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfi) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfi(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setnotrapwfi, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setnotrapwfi) */
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfi) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfi(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfi)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_no_trap_wfi;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getnotrapwfe, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfe) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfe) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfe(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 18;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setnotrapwfe, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setnotrapwfe) */
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfe) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfe(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfe)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 18;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_no_trap_wfe;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getwriteexecnever, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getwriteexecnever(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 19;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setwriteexecnever, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setwriteexecnever) */
+
+/*lint -esym(621, okl4_arm_sctlr_setwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _write_exec_never)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 19;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_write_exec_never;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getuserwriteexecnever, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getuserwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getuserwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getuserwriteexecnever(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 20;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setuserwriteexecnever, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setuserwriteexecnever) */
+
+/*lint -esym(621, okl4_arm_sctlr_setuserwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setuserwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _user_write_exec_never)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 20;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_user_write_exec_never;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getreserved22, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getreserved22) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved22) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved22(const okl4_arm_sctlr_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 22;
+            uint32_t field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_getreserved23, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getreserved23) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved23) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved23(const okl4_arm_sctlr_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 23;
+            uint32_t field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_getel0endianness, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getel0endianness) */
+/*lint -esym(714, okl4_arm_sctlr_getel0endianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getel0endianness(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setel0endianness, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setel0endianness) */
+
+/*lint -esym(621, okl4_arm_sctlr_setel0endianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setel0endianness(okl4_arm_sctlr_t *x, okl4_bool_t _el0_endianness)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_el0_endianness;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getexceptionendianness, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getexceptionendianness) */
+/*lint -esym(714, okl4_arm_sctlr_getexceptionendianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getexceptionendianness(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 25;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setexceptionendianness, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setexceptionendianness) */
+
+/*lint -esym(621, okl4_arm_sctlr_setexceptionendianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setexceptionendianness(okl4_arm_sctlr_t *x, okl4_bool_t _exception_endianness)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 25;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_exception_endianness;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_gettexremapenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_gettexremapenable) */
+/*lint -esym(714, okl4_arm_sctlr_gettexremapenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_gettexremapenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_settexremapenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_settexremapenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_settexremapenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_settexremapenable(okl4_arm_sctlr_t *x, okl4_bool_t _tex_remap_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_tex_remap_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getaccessflagenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getaccessflagenable) */
+/*lint -esym(714, okl4_arm_sctlr_getaccessflagenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getaccessflagenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 29;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setaccessflagenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setaccessflagenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setaccessflagenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setaccessflagenable(okl4_arm_sctlr_t *x, okl4_bool_t _access_flag_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 29;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_access_flag_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getthumbexceptionenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getthumbexceptionenable) */
+/*lint -esym(714, okl4_arm_sctlr_getthumbexceptionenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getthumbexceptionenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setthumbexceptionenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setthumbexceptionenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setthumbexceptionenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setthumbexceptionenable(okl4_arm_sctlr_t *x, okl4_bool_t _thumb_exception_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_thumb_exception_enable;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_arm_sctlr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_init(okl4_arm_sctlr_t *x)
+{
+    *x = (okl4_arm_sctlr_t)12912928U;
+}
+
+/*lint -esym(714, okl4_arm_sctlr_cast) */
+OKL4_FORCE_INLINE okl4_arm_sctlr_t
+okl4_arm_sctlr_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_arm_sctlr_t x = (okl4_arm_sctlr_t)p;
+    if (force) {
+        x &= ~(okl4_arm_sctlr_t)0x800U;
+        x |= (okl4_arm_sctlr_t)0x800U; /* x.reserved11 */
+        x &= ~(okl4_arm_sctlr_t)0x400000U;
+        x |= (okl4_arm_sctlr_t)0x400000U; /* x.reserved22 */
+        x &= ~(okl4_arm_sctlr_t)0x800000U;
+        x |= (okl4_arm_sctlr_t)0x800000U; /* x.reserved23 */
+    }
+    return x;
+}
+
+
+
+
+typedef uint32_t okl4_arm_smccc_arch_function_t;
+
+/*lint -esym(621, OKL4_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION) */
+#define OKL4_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION ((okl4_arm_smccc_arch_function_t)0x0U)
+/*lint -esym(621, OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES) */
+#define OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES ((okl4_arm_smccc_arch_function_t)0x1U)
+/*lint -esym(621, OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1) */
+#define OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1 ((okl4_arm_smccc_arch_function_t)0x8000U)
+
+/*lint -esym(714, okl4_arm_smccc_arch_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_arch_function_is_element_of(okl4_arm_smccc_arch_function_t var);
+
+
+/*lint -esym(714, okl4_arm_smccc_arch_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_arch_function_is_element_of(okl4_arm_smccc_arch_function_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION) ||
+            (var == OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES) ||
+            (var == OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1));
+}
+
+
+
+typedef uint32_t okl4_arm_smccc_result_t;
+
+/*lint -esym(621, OKL4_ARM_SMCCC_RESULT_SUCCESS) */
+#define OKL4_ARM_SMCCC_RESULT_SUCCESS ((okl4_arm_smccc_result_t)0x0U)
+/*lint -esym(621, OKL4_ARM_SMCCC_RESULT_NOT_SUPPORTED) */
+#define OKL4_ARM_SMCCC_RESULT_NOT_SUPPORTED ((okl4_arm_smccc_result_t)0xffffffffU)
+
+/*lint -esym(714, okl4_arm_smccc_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_result_is_element_of(okl4_arm_smccc_result_t var);
+
+
+/*lint -esym(714, okl4_arm_smccc_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_result_is_element_of(okl4_arm_smccc_result_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ARM_SMCCC_RESULT_SUCCESS) ||
+            (var == OKL4_ARM_SMCCC_RESULT_NOT_SUPPORTED));
+}
+
+
+/**
+    The `okl4_register_t` type represents an unsigned, machine-native
+    register-sized integer value.
+*/
+
+typedef uint64_t okl4_register_t;
+
+
+
+
+
+typedef okl4_register_t okl4_atomic_raw_register_t;
+
+
+
+
+
+
+
+
+
+typedef uint16_t okl4_atomic_raw_uint16_t;
+
+
+
+
+
+typedef uint32_t okl4_atomic_raw_uint32_t;
+
+
+
+
+
+typedef uint64_t okl4_atomic_raw_uint64_t;
+
+
+
+
+
+
+
+
+
+typedef uint8_t okl4_atomic_raw_uint8_t;
+
+
+
+
+/**
+    The okl4_atomic_register_t type implements a machine-word-sized value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_register {
+    volatile okl4_atomic_raw_register_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_register_t type implements a machine-word-sized value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_register okl4_atomic_register_t;
+
+
+
+
+/**
+    The okl4_atomic_uint16_t type implements a 16-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint16 {
+    volatile okl4_atomic_raw_uint16_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_uint16_t type implements a 16-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint16 okl4_atomic_uint16_t;
+
+
+
+
+/**
+    The okl4_atomic_uint32_t type implements a 32-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint32 {
+    volatile okl4_atomic_raw_uint32_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_uint32_t type implements a 32-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint32 okl4_atomic_uint32_t;
+
+
+
+
+/**
+    The okl4_atomic_uint64_t type implements a 64-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint64 {
+    volatile okl4_atomic_raw_uint64_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_uint64_t type implements a 64-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint64 okl4_atomic_uint64_t;
+
+
+
+
+/**
+    The okl4_atomic_uint8_t type implements an 8-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint8 {
+    volatile okl4_atomic_raw_uint8_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_uint8_t type implements an 8-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint8 okl4_atomic_uint8_t;
+
+
+
+
+/**
+    The `okl4_count_t` type represents a natural number of items or
+    iterations. This type is unsigned and cannot represent error values; use
+    `okl4_scount_t` if an error representation is required.
+*/
+
+typedef uint32_t okl4_count_t;
+
+/*lint -esym(621, OKL4_DEFAULT_PAGEBITS) */
+#define OKL4_DEFAULT_PAGEBITS ((okl4_count_t)(12U))
+
+/** The maximum limit for segment index retured in mmu_lookup_segment. */
+/*lint -esym(621, OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK) */
+#define OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK ((okl4_count_t)(1023U))
+
+/** The maximum limit for segment attachments to a KMMU. */
+/*lint -esym(621, OKL4_KMMU_MAX_SEGMENTS) */
+#define OKL4_KMMU_MAX_SEGMENTS ((okl4_count_t)(256U))
+
+/*lint -esym(621, OKL4_PROFILE_NO_PCPUS) */
+#define OKL4_PROFILE_NO_PCPUS ((okl4_count_t)(0xffffffffU))
+
+
+
+/**
+    The `okl4_kcap_t` type represents a kernel object capability identifier
+    (otherwise known as *designator* or *cap*) that addresses a kernel
+    capability. A capability encodes rights to perform particular operations on
+    a kernel object.
+*/
+
+typedef okl4_count_t okl4_kcap_t;
+
+/*lint -esym(621, OKL4_KCAP_INVALID) */
+#define OKL4_KCAP_INVALID ((okl4_kcap_t)(0xffffffffU))
+
+
+
+/**
+    The `okl4_interrupt_number_t` type is an index into the interrupt ID
+    space. For platforms with a single simple interrupt controller, this is
+    the physical interrupt number. When there are multiple interrupt
+    controllers, or a large and sparse interrupt ID space, the mapping from
+    this type to the physical interrupt is defined by the KSP.
+*/
+
+typedef okl4_count_t okl4_interrupt_number_t;
+
+/*lint -esym(621, OKL4_INTERRUPT_INVALID_IRQ) */
+#define OKL4_INTERRUPT_INVALID_IRQ ((okl4_interrupt_number_t)(1023U))
+
+/*lint -esym(621, OKL4_INVALID_VIRQ) */
+#define OKL4_INVALID_VIRQ ((okl4_interrupt_number_t)(1023U))
+
+
+
+
+typedef okl4_interrupt_number_t okl4_irq_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_axon_data {
+    okl4_kcap_t kcap;
+    okl4_kcap_t segment;
+    okl4_irq_t virq;
+};
+
+
+
+
+/**
+    The `okl4_psize_t` type represents an unsigned integer value which is large
+    enough to represent the size of any physical memory object.
+*/
+
+typedef okl4_register_t okl4_psize_t;
+
+
+
+
+/**
+    The `okl4_lsize_t` type represents an unsigned integer value which is large
+    enough to represent the size of any guest logical memory object.
+*/
+
+typedef okl4_psize_t okl4_lsize_t;
+
+/*lint -esym(621, OKL4_DEFAULT_PAGESIZE) */
+#define OKL4_DEFAULT_PAGESIZE ((okl4_lsize_t)(4096U))
+
+
+
+/**
+    The `okl4_laddr_t` type represents an unsigned integer value which is large
+    enough to contain a guest logical address; that is, an address in the
+    input address space of the guest's virtual MMU. This may be larger than
+    the machine's pointer type.
+*/
+
+typedef okl4_lsize_t okl4_laddr_t;
+
+/*lint -esym(621, OKL4_USER_AREA_END) */
+#define OKL4_USER_AREA_END ((okl4_laddr_t)(17592186044416U))
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_PENDING_AXON_DATA_INFO
+    - BIT 1 -   @ref OKL4_MASK_FAILURE_AXON_DATA_INFO
+    - BIT 2 -   @ref OKL4_MASK_USR_AXON_DATA_INFO
+    - BITS 63..3 -   @ref OKL4_MASK_LADDR_AXON_DATA_INFO
+*/
+
+/*lint -esym(621, okl4_axon_data_info_t) */
+typedef okl4_laddr_t okl4_axon_data_info_t;
+
+/*lint -esym(621, okl4_axon_data_info_getpending) */
+/*lint -esym(714, okl4_axon_data_info_getpending) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getpending(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setpending) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setpending(okl4_axon_data_info_t *x, okl4_bool_t _pending);
+
+/*lint -esym(621, okl4_axon_data_info_getfailure) */
+/*lint -esym(714, okl4_axon_data_info_getfailure) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getfailure(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setfailure) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setfailure(okl4_axon_data_info_t *x, okl4_bool_t _failure);
+
+/*lint -esym(621, okl4_axon_data_info_getusr) */
+/*lint -esym(714, okl4_axon_data_info_getusr) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getusr(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setusr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setusr(okl4_axon_data_info_t *x, okl4_bool_t _usr);
+
+/*lint -esym(621, okl4_axon_data_info_getladdr) */
+/*lint -esym(714, okl4_axon_data_info_getladdr) */
+OKL4_FORCE_INLINE okl4_laddr_t
+okl4_axon_data_info_getladdr(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setladdr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setladdr(okl4_axon_data_info_t *x, okl4_laddr_t _laddr);
+
+/*lint -esym(714, okl4_axon_data_info_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_init(okl4_axon_data_info_t *x);
+
+/*lint -esym(714, okl4_axon_data_info_cast) */
+OKL4_FORCE_INLINE okl4_axon_data_info_t
+okl4_axon_data_info_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_AXON_DATA_INFO_PENDING_MASK) */
+#define OKL4_AXON_DATA_INFO_PENDING_MASK ((okl4_axon_data_info_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_PENDING_AXON_DATA_INFO) */
+#define OKL4_MASK_PENDING_AXON_DATA_INFO ((okl4_axon_data_info_t)1U)
+/*lint -esym(621, OKL4_SHIFT_PENDING_AXON_DATA_INFO) */
+#define OKL4_SHIFT_PENDING_AXON_DATA_INFO (0)
+/*lint -esym(621, OKL4_WIDTH_PENDING_AXON_DATA_INFO) */
+#define OKL4_WIDTH_PENDING_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_AXON_DATA_INFO_FAILURE_MASK) */
+#define OKL4_AXON_DATA_INFO_FAILURE_MASK ((okl4_axon_data_info_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_FAILURE_AXON_DATA_INFO) */
+#define OKL4_MASK_FAILURE_AXON_DATA_INFO ((okl4_axon_data_info_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_FAILURE_AXON_DATA_INFO) */
+#define OKL4_SHIFT_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_WIDTH_FAILURE_AXON_DATA_INFO) */
+#define OKL4_WIDTH_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_AXON_DATA_INFO_USR_MASK) */
+#define OKL4_AXON_DATA_INFO_USR_MASK ((okl4_axon_data_info_t)1U << 2) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USR_AXON_DATA_INFO) */
+#define OKL4_MASK_USR_AXON_DATA_INFO ((okl4_axon_data_info_t)1U << 2)
+/*lint -esym(621, OKL4_SHIFT_USR_AXON_DATA_INFO) */
+#define OKL4_SHIFT_USR_AXON_DATA_INFO (2)
+/*lint -esym(621, OKL4_WIDTH_USR_AXON_DATA_INFO) */
+#define OKL4_WIDTH_USR_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_AXON_DATA_INFO_LADDR_MASK) */
+#define OKL4_AXON_DATA_INFO_LADDR_MASK ((okl4_axon_data_info_t)2305843009213693951U << 3) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_LADDR_AXON_DATA_INFO) */
+#define OKL4_MASK_LADDR_AXON_DATA_INFO ((okl4_axon_data_info_t)2305843009213693951U << 3)
+/*lint -esym(621, OKL4_SHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_SHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_PRESHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_PRESHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_WIDTH_LADDR_AXON_DATA_INFO) */
+#define OKL4_WIDTH_LADDR_AXON_DATA_INFO (61)
+
+
+/*lint -sem(okl4_axon_data_info_getpending, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_data_info_getpending) */
+/*lint -esym(714, okl4_axon_data_info_getpending) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getpending(const okl4_axon_data_info_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_data_info_setpending, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_data_info_setpending) */
+
+/*lint -esym(621, okl4_axon_data_info_setpending) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setpending(okl4_axon_data_info_t *x, okl4_bool_t _pending)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_pending;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_data_info_getfailure, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_data_info_getfailure) */
+/*lint -esym(714, okl4_axon_data_info_getfailure) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getfailure(const okl4_axon_data_info_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_data_info_setfailure, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_data_info_setfailure) */
+
+/*lint -esym(621, okl4_axon_data_info_setfailure) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setfailure(okl4_axon_data_info_t *x, okl4_bool_t _failure)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_failure;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_data_info_getusr, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_data_info_getusr) */
+/*lint -esym(714, okl4_axon_data_info_getusr) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getusr(const okl4_axon_data_info_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_data_info_setusr, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_data_info_setusr) */
+
+/*lint -esym(621, okl4_axon_data_info_setusr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setusr(okl4_axon_data_info_t *x, okl4_bool_t _usr)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_usr;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_data_info_getladdr, 1p) */
+/*lint -esym(621, okl4_axon_data_info_getladdr) */
+/*lint -esym(714, okl4_axon_data_info_getladdr) */
+OKL4_FORCE_INLINE okl4_laddr_t
+okl4_axon_data_info_getladdr(const okl4_axon_data_info_t *x)
+{
+    okl4_laddr_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 3;
+            uint64_t field : 61;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_laddr_t)_conv.bits.field;
+    return (okl4_laddr_t)(field << 3);
+}
+
+/*lint -esym(714, okl4_axon_data_info_setladdr) */
+
+/*lint -esym(621, okl4_axon_data_info_setladdr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setladdr(okl4_axon_data_info_t *x, okl4_laddr_t _laddr)
+{
+    okl4_laddr_t val = _laddr >> 3;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 3;
+            uint64_t field : 61;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)val;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_axon_data_info_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_init(okl4_axon_data_info_t *x)
+{
+    *x = (okl4_axon_data_info_t)0U;
+}
+
+/*lint -esym(714, okl4_axon_data_info_cast) */
+OKL4_FORCE_INLINE okl4_axon_data_info_t
+okl4_axon_data_info_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_axon_data_info_t x = (okl4_axon_data_info_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+
+*/
+
+struct okl4_axon_ep_data {
+    struct okl4_axon_data rx;
+    struct okl4_axon_data tx;
+};
+
+
+
+
+
+
+
+
+
+typedef char _okl4_padding_t;
+
+
+
+
+
+struct okl4_axon_queue {
+    uint32_t queue_offset;
+    uint16_t entries;
+    volatile uint16_t kptr;
+    volatile uint16_t uptr;
+    _okl4_padding_t __padding0_2; /**< Padding 4 */
+    _okl4_padding_t __padding1_3; /**< Padding 4 */
+};
+
+
+
+
+
+
+/**
+    The `okl4_ksize_t` type represents an unsigned integer value which is large
+    enough to represent the size of any kernel-accessible memory object.
+*/
+
+typedef okl4_lsize_t okl4_ksize_t;
+
+
+
+
+
+struct okl4_axon_queue_entry {
+    okl4_axon_data_info_t info;
+    okl4_ksize_t data_size;
+    uint32_t recv_sequence;
+    _okl4_padding_t __padding0_4; /**< Padding 8 */
+    _okl4_padding_t __padding1_5; /**< Padding 8 */
+    _okl4_padding_t __padding2_6; /**< Padding 8 */
+    _okl4_padding_t __padding3_7; /**< Padding 8 */
+};
+
+
+
+
+
+
+/**
+    - BITS 4..0 -   @ref OKL4_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE
+    - BITS 12..8 -   @ref OKL4_MASK_MIN_ORDER_AXON_QUEUE_SIZE
+*/
+
+/*lint -esym(621, okl4_axon_queue_size_t) */
+typedef uint16_t okl4_axon_queue_size_t;
+
+/*lint -esym(621, okl4_axon_queue_size_getallocorder) */
+/*lint -esym(714, okl4_axon_queue_size_getallocorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getallocorder(const okl4_axon_queue_size_t *x);
+
+/*lint -esym(621, okl4_axon_queue_size_setallocorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setallocorder(okl4_axon_queue_size_t *x, okl4_count_t _alloc_order);
+
+/*lint -esym(621, okl4_axon_queue_size_getminorder) */
+/*lint -esym(714, okl4_axon_queue_size_getminorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getminorder(const okl4_axon_queue_size_t *x);
+
+/*lint -esym(621, okl4_axon_queue_size_setminorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setminorder(okl4_axon_queue_size_t *x, okl4_count_t _min_order);
+
+/*lint -esym(714, okl4_axon_queue_size_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_init(okl4_axon_queue_size_t *x);
+
+/*lint -esym(714, okl4_axon_queue_size_cast) */
+OKL4_FORCE_INLINE okl4_axon_queue_size_t
+okl4_axon_queue_size_cast(uint16_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_AXON_QUEUE_SIZE_ALLOC_ORDER_MASK) */
+#define OKL4_AXON_QUEUE_SIZE_ALLOC_ORDER_MASK (okl4_axon_queue_size_t)(31U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE (okl4_axon_queue_size_t)(31U)
+/*lint -esym(621, OKL4_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE (0)
+/*lint -esym(621, OKL4_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE (5)
+/*lint -esym(621, OKL4_AXON_QUEUE_SIZE_MIN_ORDER_MASK) */
+#define OKL4_AXON_QUEUE_SIZE_MIN_ORDER_MASK (okl4_axon_queue_size_t)(31U << 8) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_MASK_MIN_ORDER_AXON_QUEUE_SIZE (okl4_axon_queue_size_t)(31U << 8)
+/*lint -esym(621, OKL4_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE (8)
+/*lint -esym(621, OKL4_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE (5)
+
+
+/*lint -sem(okl4_axon_queue_size_getallocorder, 1p, @n >= 0 && @n <= 31) */
+/*lint -esym(621, okl4_axon_queue_size_getallocorder) */
+/*lint -esym(714, okl4_axon_queue_size_getallocorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getallocorder(const okl4_axon_queue_size_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 5;
+        } bits;
+        okl4_axon_queue_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_queue_size_setallocorder, 2n >= 0 && 2n <= 31) */
+/*lint -esym(714, okl4_axon_queue_size_setallocorder) */
+
+/*lint -esym(621, okl4_axon_queue_size_setallocorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setallocorder(okl4_axon_queue_size_t *x, okl4_count_t _alloc_order)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 5;
+        } bits;
+        okl4_axon_queue_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_alloc_order;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_queue_size_getminorder, 1p, @n >= 0 && @n <= 31) */
+/*lint -esym(621, okl4_axon_queue_size_getminorder) */
+/*lint -esym(714, okl4_axon_queue_size_getminorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getminorder(const okl4_axon_queue_size_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            uint32_t field : 5;
+        } bits;
+        okl4_axon_queue_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_queue_size_setminorder, 2n >= 0 && 2n <= 31) */
+/*lint -esym(714, okl4_axon_queue_size_setminorder) */
+
+/*lint -esym(621, okl4_axon_queue_size_setminorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setminorder(okl4_axon_queue_size_t *x, okl4_count_t _min_order)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            uint32_t field : 5;
+        } bits;
+        okl4_axon_queue_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_min_order;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_axon_queue_size_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_init(okl4_axon_queue_size_t *x)
+{
+    *x = (okl4_axon_queue_size_t)0U;
+}
+
+/*lint -esym(714, okl4_axon_queue_size_cast) */
+OKL4_FORCE_INLINE okl4_axon_queue_size_t
+okl4_axon_queue_size_cast(uint16_t p, okl4_bool_t force)
+{
+    okl4_axon_queue_size_t x = (okl4_axon_queue_size_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct okl4_axon_rx {
+    struct okl4_axon_queue queues[4];
+    okl4_axon_queue_size_t queue_sizes[4];
+};
+
+
+
+
+
+
+
+struct okl4_axon_tx {
+    struct okl4_axon_queue queues[4];
+};
+
+
+
+
+
+
+
+typedef okl4_register_t okl4_virq_flags_t;
+
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_READY_AXON_VIRQ_FLAGS
+    - BIT 1 -   @ref OKL4_MASK_FAULT_AXON_VIRQ_FLAGS
+*/
+
+/*lint -esym(621, okl4_axon_virq_flags_t) */
+typedef okl4_virq_flags_t okl4_axon_virq_flags_t;
+
+/*lint -esym(621, okl4_axon_virq_flags_getready) */
+/*lint -esym(714, okl4_axon_virq_flags_getready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getready(const okl4_axon_virq_flags_t *x);
+
+/*lint -esym(621, okl4_axon_virq_flags_setready) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setready(okl4_axon_virq_flags_t *x, okl4_bool_t _ready);
+
+/*lint -esym(621, okl4_axon_virq_flags_getfault) */
+/*lint -esym(714, okl4_axon_virq_flags_getfault) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getfault(const okl4_axon_virq_flags_t *x);
+
+/*lint -esym(621, okl4_axon_virq_flags_setfault) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setfault(okl4_axon_virq_flags_t *x, okl4_bool_t _fault);
+
+/*lint -esym(714, okl4_axon_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_init(okl4_axon_virq_flags_t *x);
+
+/*lint -esym(714, okl4_axon_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_axon_virq_flags_t
+okl4_axon_virq_flags_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_AXON_VIRQ_FLAGS_READY_MASK) */
+#define OKL4_AXON_VIRQ_FLAGS_READY_MASK ((okl4_axon_virq_flags_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_MASK_READY_AXON_VIRQ_FLAGS ((okl4_axon_virq_flags_t)1U)
+/*lint -esym(621, OKL4_SHIFT_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_SHIFT_READY_AXON_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_WIDTH_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_WIDTH_READY_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_AXON_VIRQ_FLAGS_FAULT_MASK) */
+#define OKL4_AXON_VIRQ_FLAGS_FAULT_MASK ((okl4_axon_virq_flags_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_MASK_FAULT_AXON_VIRQ_FLAGS ((okl4_axon_virq_flags_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_SHIFT_FAULT_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_WIDTH_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_WIDTH_FAULT_AXON_VIRQ_FLAGS (1)
+
+
+/*lint -sem(okl4_axon_virq_flags_getready, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_virq_flags_getready) */
+/*lint -esym(714, okl4_axon_virq_flags_getready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getready(const okl4_axon_virq_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_axon_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_virq_flags_setready, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_virq_flags_setready) */
+
+/*lint -esym(621, okl4_axon_virq_flags_setready) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setready(okl4_axon_virq_flags_t *x, okl4_bool_t _ready)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_axon_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_ready;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_virq_flags_getfault, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_virq_flags_getfault) */
+/*lint -esym(714, okl4_axon_virq_flags_getfault) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getfault(const okl4_axon_virq_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_virq_flags_setfault, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_virq_flags_setfault) */
+
+/*lint -esym(621, okl4_axon_virq_flags_setfault) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setfault(okl4_axon_virq_flags_t *x, okl4_bool_t _fault)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_fault;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_axon_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_init(okl4_axon_virq_flags_t *x)
+{
+    *x = (okl4_axon_virq_flags_t)0U;
+}
+
+/*lint -esym(714, okl4_axon_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_axon_virq_flags_t
+okl4_axon_virq_flags_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_axon_virq_flags_t x = (okl4_axon_virq_flags_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    The `okl4_page_cache_t` object represents a set of attributes that
+    controls the caching behaviour of memory page mappings.
+
+    - @ref OKL4_PAGE_CACHE_WRITECOMBINE
+    - @ref OKL4_PAGE_CACHE_DEFAULT
+    - @ref OKL4_PAGE_CACHE_IPC_RX
+    - @ref OKL4_PAGE_CACHE_IPC_TX
+    - @ref OKL4_PAGE_CACHE_TRACEBUFFER
+    - @ref OKL4_PAGE_CACHE_WRITEBACK
+    - @ref OKL4_PAGE_CACHE_IWB_RWA_ONC
+    - @ref OKL4_PAGE_CACHE_WRITETHROUGH
+    - @ref OKL4_PAGE_CACHE_DEVICE_GRE
+    - @ref OKL4_PAGE_CACHE_DEVICE_NGRE
+    - @ref OKL4_PAGE_CACHE_DEVICE
+    - @ref OKL4_PAGE_CACHE_STRONG
+    - @ref OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE
+    - @ref OKL4_PAGE_CACHE_HW_MASK
+    - @ref OKL4_PAGE_CACHE_HW_DEVICE_NGNRE
+    - @ref OKL4_PAGE_CACHE_HW_DEVICE_NGRE
+    - @ref OKL4_PAGE_CACHE_HW_DEVICE_GRE
+    - @ref OKL4_PAGE_CACHE_HW_TWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_NC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_NC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_NC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_MAX
+    - @ref OKL4_PAGE_CACHE_INVALID
+*/
+
+typedef okl4_count_t okl4_page_cache_t;
+
+/*lint -esym(621, OKL4_PAGE_CACHE_WRITECOMBINE) */
+#define OKL4_PAGE_CACHE_WRITECOMBINE ((okl4_page_cache_t)0x0U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEFAULT) */
+#define OKL4_PAGE_CACHE_DEFAULT ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_IPC_RX) */
+#define OKL4_PAGE_CACHE_IPC_RX ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_IPC_TX) */
+#define OKL4_PAGE_CACHE_IPC_TX ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_TRACEBUFFER) */
+#define OKL4_PAGE_CACHE_TRACEBUFFER ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_WRITEBACK) */
+#define OKL4_PAGE_CACHE_WRITEBACK ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_IWB_RWA_ONC) */
+#define OKL4_PAGE_CACHE_IWB_RWA_ONC ((okl4_page_cache_t)0x2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_WRITETHROUGH) */
+#define OKL4_PAGE_CACHE_WRITETHROUGH ((okl4_page_cache_t)0x3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEVICE_GRE) */
+#define OKL4_PAGE_CACHE_DEVICE_GRE ((okl4_page_cache_t)0x4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEVICE_NGRE) */
+#define OKL4_PAGE_CACHE_DEVICE_NGRE ((okl4_page_cache_t)0x5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEVICE) */
+#define OKL4_PAGE_CACHE_DEVICE ((okl4_page_cache_t)0x6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_STRONG) */
+#define OKL4_PAGE_CACHE_STRONG ((okl4_page_cache_t)0x7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE ((okl4_page_cache_t)0x8000000U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_MASK) */
+#define OKL4_PAGE_CACHE_HW_MASK ((okl4_page_cache_t)0x8000000U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_NGNRE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_NGNRE ((okl4_page_cache_t)0x8000004U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_NGRE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_NGRE ((okl4_page_cache_t)0x8000008U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_GRE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_GRE ((okl4_page_cache_t)0x800000cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_WA_NSH ((okl4_page_cache_t)0x8000011U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000012U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000013U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH ((okl4_page_cache_t)0x8000014U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000015U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000016U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000017U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000018U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000019U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000021U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RA_NSH ((okl4_page_cache_t)0x8000022U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000023U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH ((okl4_page_cache_t)0x8000024U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000025U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000026U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000027U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000028U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000029U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000031U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000032U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RWA_NSH ((okl4_page_cache_t)0x8000033U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000034U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000035U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000036U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000037U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000038U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000039U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH ((okl4_page_cache_t)0x8000041U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH ((okl4_page_cache_t)0x8000042U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH ((okl4_page_cache_t)0x8000043U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_NC_NSH) */
+#define OKL4_PAGE_CACHE_HW_NC_NSH ((okl4_page_cache_t)0x8000044U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH ((okl4_page_cache_t)0x8000045U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH ((okl4_page_cache_t)0x8000046U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH ((okl4_page_cache_t)0x8000047U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH ((okl4_page_cache_t)0x8000048U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH ((okl4_page_cache_t)0x8000049U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH ((okl4_page_cache_t)0x800004aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH ((okl4_page_cache_t)0x800004bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH ((okl4_page_cache_t)0x800004cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH ((okl4_page_cache_t)0x800004dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH ((okl4_page_cache_t)0x800004eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH ((okl4_page_cache_t)0x800004fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000051U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000052U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000053U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH ((okl4_page_cache_t)0x8000054U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_WA_NSH ((okl4_page_cache_t)0x8000055U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000056U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000057U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000058U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000059U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000061U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000062U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000063U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH ((okl4_page_cache_t)0x8000064U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000065U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RA_NSH ((okl4_page_cache_t)0x8000066U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000067U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000068U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000069U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000071U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000072U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000073U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000074U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000075U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000076U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RWA_NSH ((okl4_page_cache_t)0x8000077U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000078U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000079U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH ((okl4_page_cache_t)0x8000081U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH ((okl4_page_cache_t)0x8000082U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x8000083U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH ((okl4_page_cache_t)0x8000084U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH ((okl4_page_cache_t)0x8000085U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH ((okl4_page_cache_t)0x8000086U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x8000087U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_NA_NSH ((okl4_page_cache_t)0x8000088U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH ((okl4_page_cache_t)0x8000089U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH ((okl4_page_cache_t)0x800008aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x800008bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH ((okl4_page_cache_t)0x800008cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH ((okl4_page_cache_t)0x800008dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH ((okl4_page_cache_t)0x800008eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x800008fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH ((okl4_page_cache_t)0x8000091U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH ((okl4_page_cache_t)0x8000092U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x8000093U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH ((okl4_page_cache_t)0x8000094U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH ((okl4_page_cache_t)0x8000095U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH ((okl4_page_cache_t)0x8000096U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x8000097U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH ((okl4_page_cache_t)0x8000098U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_WA_NSH ((okl4_page_cache_t)0x8000099U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH ((okl4_page_cache_t)0x800009aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x800009bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH ((okl4_page_cache_t)0x800009cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH ((okl4_page_cache_t)0x800009dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH ((okl4_page_cache_t)0x800009eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x800009fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH ((okl4_page_cache_t)0x80000a4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RA_NSH ((okl4_page_cache_t)0x80000aaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000abU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH ((okl4_page_cache_t)0x80000acU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000adU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH ((okl4_page_cache_t)0x80000aeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000afU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000baU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RWA_NSH ((okl4_page_cache_t)0x80000bbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000bcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000bdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000beU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000bfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH ((okl4_page_cache_t)0x80000c4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000caU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000cbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_NA_NSH ((okl4_page_cache_t)0x80000ccU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000cdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000ceU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000cfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH ((okl4_page_cache_t)0x80000d4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000daU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000dbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH ((okl4_page_cache_t)0x80000dcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_WA_NSH ((okl4_page_cache_t)0x80000ddU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000deU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000dfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH ((okl4_page_cache_t)0x80000e4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH ((okl4_page_cache_t)0x80000eaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000ebU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH ((okl4_page_cache_t)0x80000ecU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000edU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RA_NSH ((okl4_page_cache_t)0x80000eeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000efU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000faU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000fbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000fcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000fdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000feU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RWA_NSH ((okl4_page_cache_t)0x80000ffU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_WA_OSH ((okl4_page_cache_t)0x8000211U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000212U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000213U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH ((okl4_page_cache_t)0x8000214U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000215U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000216U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000217U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000218U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000219U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000221U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RA_OSH ((okl4_page_cache_t)0x8000222U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000223U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH ((okl4_page_cache_t)0x8000224U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000225U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000226U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000227U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000228U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000229U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000231U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000232U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RWA_OSH ((okl4_page_cache_t)0x8000233U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000234U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000235U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000236U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000237U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000238U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000239U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH ((okl4_page_cache_t)0x8000241U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH ((okl4_page_cache_t)0x8000242U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH ((okl4_page_cache_t)0x8000243U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_NC_OSH) */
+#define OKL4_PAGE_CACHE_HW_NC_OSH ((okl4_page_cache_t)0x8000244U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH ((okl4_page_cache_t)0x8000245U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH ((okl4_page_cache_t)0x8000246U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH ((okl4_page_cache_t)0x8000247U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH ((okl4_page_cache_t)0x8000248U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH ((okl4_page_cache_t)0x8000249U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH ((okl4_page_cache_t)0x800024aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH ((okl4_page_cache_t)0x800024bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH ((okl4_page_cache_t)0x800024cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH ((okl4_page_cache_t)0x800024dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH ((okl4_page_cache_t)0x800024eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH ((okl4_page_cache_t)0x800024fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000251U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000252U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000253U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH ((okl4_page_cache_t)0x8000254U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_WA_OSH ((okl4_page_cache_t)0x8000255U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000256U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000257U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000258U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000259U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000261U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000262U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000263U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH ((okl4_page_cache_t)0x8000264U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000265U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RA_OSH ((okl4_page_cache_t)0x8000266U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000267U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000268U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000269U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000271U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000272U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000273U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000274U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000275U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000276U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RWA_OSH ((okl4_page_cache_t)0x8000277U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000278U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000279U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH ((okl4_page_cache_t)0x8000281U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH ((okl4_page_cache_t)0x8000282U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x8000283U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH ((okl4_page_cache_t)0x8000284U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH ((okl4_page_cache_t)0x8000285U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH ((okl4_page_cache_t)0x8000286U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x8000287U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_NA_OSH ((okl4_page_cache_t)0x8000288U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH ((okl4_page_cache_t)0x8000289U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH ((okl4_page_cache_t)0x800028aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x800028bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH ((okl4_page_cache_t)0x800028cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH ((okl4_page_cache_t)0x800028dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH ((okl4_page_cache_t)0x800028eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x800028fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH ((okl4_page_cache_t)0x8000291U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH ((okl4_page_cache_t)0x8000292U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x8000293U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH ((okl4_page_cache_t)0x8000294U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH ((okl4_page_cache_t)0x8000295U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH ((okl4_page_cache_t)0x8000296U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x8000297U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH ((okl4_page_cache_t)0x8000298U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_WA_OSH ((okl4_page_cache_t)0x8000299U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH ((okl4_page_cache_t)0x800029aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x800029bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH ((okl4_page_cache_t)0x800029cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH ((okl4_page_cache_t)0x800029dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH ((okl4_page_cache_t)0x800029eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x800029fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH ((okl4_page_cache_t)0x80002a4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RA_OSH ((okl4_page_cache_t)0x80002aaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002abU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH ((okl4_page_cache_t)0x80002acU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002adU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH ((okl4_page_cache_t)0x80002aeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002afU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002baU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RWA_OSH ((okl4_page_cache_t)0x80002bbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002bcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002bdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002beU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002bfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH ((okl4_page_cache_t)0x80002c4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002caU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002cbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_NA_OSH ((okl4_page_cache_t)0x80002ccU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002cdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002ceU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002cfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH ((okl4_page_cache_t)0x80002d4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002daU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002dbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH ((okl4_page_cache_t)0x80002dcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_WA_OSH ((okl4_page_cache_t)0x80002ddU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002deU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002dfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH ((okl4_page_cache_t)0x80002e4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH ((okl4_page_cache_t)0x80002eaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002ebU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH ((okl4_page_cache_t)0x80002ecU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002edU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RA_OSH ((okl4_page_cache_t)0x80002eeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002efU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002faU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002fbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002fcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002fdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002feU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RWA_OSH ((okl4_page_cache_t)0x80002ffU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWT_WA_ISH ((okl4_page_cache_t)0x8000311U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000312U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000313U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH ((okl4_page_cache_t)0x8000314U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000315U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000316U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000317U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000318U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000319U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000321U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RA_ISH ((okl4_page_cache_t)0x8000322U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000323U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH ((okl4_page_cache_t)0x8000324U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000325U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000326U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000327U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000328U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000329U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000331U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000332U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RWA_ISH ((okl4_page_cache_t)0x8000333U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000334U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000335U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000336U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000337U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000338U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000339U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH ((okl4_page_cache_t)0x8000341U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH ((okl4_page_cache_t)0x8000342U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH ((okl4_page_cache_t)0x8000343U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_NC_ISH) */
+#define OKL4_PAGE_CACHE_HW_NC_ISH ((okl4_page_cache_t)0x8000344U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH ((okl4_page_cache_t)0x8000345U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH ((okl4_page_cache_t)0x8000346U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH ((okl4_page_cache_t)0x8000347U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH ((okl4_page_cache_t)0x8000348U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH ((okl4_page_cache_t)0x8000349U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH ((okl4_page_cache_t)0x800034aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH ((okl4_page_cache_t)0x800034bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH ((okl4_page_cache_t)0x800034cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH ((okl4_page_cache_t)0x800034dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH ((okl4_page_cache_t)0x800034eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH ((okl4_page_cache_t)0x800034fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000351U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000352U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000353U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH ((okl4_page_cache_t)0x8000354U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWB_WA_ISH ((okl4_page_cache_t)0x8000355U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000356U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000357U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000358U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000359U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000361U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000362U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000363U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH ((okl4_page_cache_t)0x8000364U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000365U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RA_ISH ((okl4_page_cache_t)0x8000366U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000367U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000368U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000369U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000371U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000372U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000373U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000374U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000375U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000376U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RWA_ISH ((okl4_page_cache_t)0x8000377U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000378U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000379U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH ((okl4_page_cache_t)0x8000381U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH ((okl4_page_cache_t)0x8000382U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x8000383U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH ((okl4_page_cache_t)0x8000384U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH ((okl4_page_cache_t)0x8000385U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH ((okl4_page_cache_t)0x8000386U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x8000387U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_NA_ISH ((okl4_page_cache_t)0x8000388U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH ((okl4_page_cache_t)0x8000389U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH ((okl4_page_cache_t)0x800038aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x800038bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH ((okl4_page_cache_t)0x800038cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH ((okl4_page_cache_t)0x800038dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH ((okl4_page_cache_t)0x800038eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x800038fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH ((okl4_page_cache_t)0x8000391U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH ((okl4_page_cache_t)0x8000392U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x8000393U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH ((okl4_page_cache_t)0x8000394U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH ((okl4_page_cache_t)0x8000395U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH ((okl4_page_cache_t)0x8000396U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x8000397U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH ((okl4_page_cache_t)0x8000398U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_WA_ISH ((okl4_page_cache_t)0x8000399U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH ((okl4_page_cache_t)0x800039aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x800039bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH ((okl4_page_cache_t)0x800039cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH ((okl4_page_cache_t)0x800039dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH ((okl4_page_cache_t)0x800039eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x800039fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH ((okl4_page_cache_t)0x80003a4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_RA_ISH ((okl4_page_cache_t)0x80003aaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003abU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH ((okl4_page_cache_t)0x80003acU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003adU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH ((okl4_page_cache_t)0x80003aeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003afU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003baU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_RWA_ISH ((okl4_page_cache_t)0x80003bbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003bcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003bdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003beU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003bfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH ((okl4_page_cache_t)0x80003c4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003caU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003cbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_NA_ISH ((okl4_page_cache_t)0x80003ccU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003cdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003ceU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003cfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH ((okl4_page_cache_t)0x80003d4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003daU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003dbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH ((okl4_page_cache_t)0x80003dcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_WA_ISH ((okl4_page_cache_t)0x80003ddU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003deU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003dfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH ((okl4_page_cache_t)0x80003e4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH ((okl4_page_cache_t)0x80003eaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003ebU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH ((okl4_page_cache_t)0x80003ecU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003edU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_RA_ISH ((okl4_page_cache_t)0x80003eeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003efU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003faU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003fbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003fcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003fdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003feU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_RWA_ISH ((okl4_page_cache_t)0x80003ffU)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_CACHE_MAX) */
+#define OKL4_PAGE_CACHE_MAX ((okl4_page_cache_t)0x80003ffU)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_CACHE_INVALID) */
+#define OKL4_PAGE_CACHE_INVALID ((okl4_page_cache_t)0xffffffffU)
+
+/*lint -esym(714, okl4_page_cache_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_cache_is_element_of(okl4_page_cache_t var);
+
+
+/*lint -esym(714, okl4_page_cache_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_cache_is_element_of(okl4_page_cache_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_PAGE_CACHE_WRITECOMBINE) ||
+            (var == OKL4_PAGE_CACHE_DEFAULT) ||
+            (var == OKL4_PAGE_CACHE_IPC_RX) ||
+            (var == OKL4_PAGE_CACHE_IPC_TX) ||
+            (var == OKL4_PAGE_CACHE_TRACEBUFFER) ||
+            (var == OKL4_PAGE_CACHE_WRITEBACK) ||
+            (var == OKL4_PAGE_CACHE_IWB_RWA_ONC) ||
+            (var == OKL4_PAGE_CACHE_WRITETHROUGH) ||
+            (var == OKL4_PAGE_CACHE_DEVICE_GRE) ||
+            (var == OKL4_PAGE_CACHE_DEVICE_NGRE) ||
+            (var == OKL4_PAGE_CACHE_DEVICE) ||
+            (var == OKL4_PAGE_CACHE_STRONG) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_DEVICE_NGNRE) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_DEVICE_GRE) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_NC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_DEVICE_NGRE) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_NC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_NC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_MASK));
+}
+
+
+
+typedef uint32_t okl4_cell_id_t;
+
+
+
+
+
+typedef char okl4_char_t;
+
+
+
+
+
+
+
+
+/**
+    The `okl4_string_t` type represents a constant C string of type
+    'const char *'.
+*/
+
+typedef const okl4_char_t *okl4_string_t;
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+
+*/
+
+struct okl4_range_item {
+    okl4_laddr_t base;
+    okl4_lsize_t size;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_virtmem_item {
+    struct okl4_range_item range;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management_item {
+    okl4_laddr_t entry;
+    struct okl4_virtmem_item mapping_range;
+    __ptr64(void *, data);
+    __ptr64(okl4_string_t, image);
+    okl4_kcap_t mmu;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(okl4_string_t, name);
+    okl4_kcap_t registers_cap;
+    okl4_kcap_t reset_virq;
+    okl4_count_t segment_index;
+    _okl4_padding_t __padding4_4;
+    _okl4_padding_t __padding5_5;
+    _okl4_padding_t __padding6_6;
+    _okl4_padding_t __padding7_7;
+    __ptr64(struct okl4_cell_management_segments *, segments);
+    __ptr64(struct okl4_cell_management_vcpus *, vcpus);
+    okl4_bool_t boot_once;
+    okl4_bool_t can_stop;
+    okl4_bool_t deferred;
+    okl4_bool_t detached;
+    okl4_bool_t erase;
+    _okl4_padding_t __padding8_5;
+    _okl4_padding_t __padding9_6;
+    _okl4_padding_t __padding10_7;
+    okl4_laddr_t dtb_address;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management {
+    okl4_count_t num_items;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct okl4_cell_management_item items[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    The `okl4_paddr_t` type represents an unsigned integer value which is large
+    enough to contain a machine-native physical address.
+*/
+
+typedef okl4_psize_t okl4_paddr_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_segment_mapping {
+    okl4_paddr_t phys_addr;
+    okl4_psize_t size;
+    okl4_laddr_t virt_addr;
+    okl4_kcap_t cap;
+    okl4_bool_t device;
+    okl4_bool_t owned;
+    _okl4_padding_t __padding0_6;
+    _okl4_padding_t __padding1_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management_segments {
+    okl4_count_t free_segments;
+    okl4_count_t num_segments;
+    struct okl4_segment_mapping segment_mappings[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management_vcpus {
+    okl4_count_t num_vcpus;
+    okl4_kcap_t vcpu_caps[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    CPU instruction set
+*/
+
+typedef uint32_t okl4_cpu_exec_mode;
+
+/*lint -esym(621, OKL4_ARM_MODE) */
+#define OKL4_ARM_MODE ((okl4_cpu_exec_mode)(0U))
+
+/*lint -esym(621, OKL4_DEFAULT_MODE) */
+#define OKL4_DEFAULT_MODE ((okl4_cpu_exec_mode)(4U))
+
+/*lint -esym(621, OKL4_JAZELLE_MODE) */
+#define OKL4_JAZELLE_MODE ((okl4_cpu_exec_mode)(2U))
+
+/*lint -esym(621, OKL4_THUMBEE_MODE) */
+#define OKL4_THUMBEE_MODE ((okl4_cpu_exec_mode)(3U))
+
+/*lint -esym(621, OKL4_THUMB_MODE) */
+#define OKL4_THUMB_MODE ((okl4_cpu_exec_mode)(1U))
+
+
+
+/**
+    CPU mode specifier
+
+    - BITS 2..0 -   @ref OKL4_MASK_EXEC_MODE_CPU_MODE
+    - BIT 7 -   @ref OKL4_MASK_ENDIAN_CPU_MODE
+*/
+
+/*lint -esym(621, okl4_cpu_mode_t) */
+typedef uint32_t okl4_cpu_mode_t;
+
+/*lint -esym(621, okl4_cpu_mode_getexecmode) */
+/*lint -esym(714, okl4_cpu_mode_getexecmode) */
+OKL4_FORCE_INLINE okl4_cpu_exec_mode
+okl4_cpu_mode_getexecmode(const okl4_cpu_mode_t *x);
+
+/*lint -esym(621, okl4_cpu_mode_setexecmode) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setexecmode(okl4_cpu_mode_t *x, okl4_cpu_exec_mode _exec_mode);
+
+/*lint -esym(621, okl4_cpu_mode_getendian) */
+/*lint -esym(714, okl4_cpu_mode_getendian) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_cpu_mode_getendian(const okl4_cpu_mode_t *x);
+
+/*lint -esym(621, okl4_cpu_mode_setendian) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setendian(okl4_cpu_mode_t *x, okl4_bool_t _endian);
+
+/*lint -esym(714, okl4_cpu_mode_init) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_init(okl4_cpu_mode_t *x);
+
+/*lint -esym(714, okl4_cpu_mode_cast) */
+OKL4_FORCE_INLINE okl4_cpu_mode_t
+okl4_cpu_mode_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_CPU_MODE_EXEC_MODE_MASK) */
+#define OKL4_CPU_MODE_EXEC_MODE_MASK ((okl4_cpu_mode_t)7U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EXEC_MODE_CPU_MODE) */
+#define OKL4_MASK_EXEC_MODE_CPU_MODE ((okl4_cpu_mode_t)7U)
+/*lint -esym(621, OKL4_SHIFT_EXEC_MODE_CPU_MODE) */
+#define OKL4_SHIFT_EXEC_MODE_CPU_MODE (0)
+/*lint -esym(621, OKL4_WIDTH_EXEC_MODE_CPU_MODE) */
+#define OKL4_WIDTH_EXEC_MODE_CPU_MODE (3)
+/*lint -esym(621, OKL4_CPU_MODE_ENDIAN_MASK) */
+#define OKL4_CPU_MODE_ENDIAN_MASK ((okl4_cpu_mode_t)1U << 7) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ENDIAN_CPU_MODE) */
+#define OKL4_MASK_ENDIAN_CPU_MODE ((okl4_cpu_mode_t)1U << 7)
+/*lint -esym(621, OKL4_SHIFT_ENDIAN_CPU_MODE) */
+#define OKL4_SHIFT_ENDIAN_CPU_MODE (7)
+/*lint -esym(621, OKL4_WIDTH_ENDIAN_CPU_MODE) */
+#define OKL4_WIDTH_ENDIAN_CPU_MODE (1)
+
+
+/*lint -sem(okl4_cpu_mode_getexecmode, 1p, @n >= 0 && @n <= 7) */
+/*lint -esym(621, okl4_cpu_mode_getexecmode) */
+/*lint -esym(714, okl4_cpu_mode_getexecmode) */
+OKL4_FORCE_INLINE okl4_cpu_exec_mode
+okl4_cpu_mode_getexecmode(const okl4_cpu_mode_t *x)
+{
+    okl4_cpu_exec_mode field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 3;
+        } bits;
+        okl4_cpu_mode_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_cpu_exec_mode)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_cpu_mode_setexecmode, 2n >= 0 && 2n <= 7) */
+/*lint -esym(714, okl4_cpu_mode_setexecmode) */
+
+/*lint -esym(621, okl4_cpu_mode_setexecmode) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setexecmode(okl4_cpu_mode_t *x, okl4_cpu_exec_mode _exec_mode)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 3;
+        } bits;
+        okl4_cpu_mode_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_exec_mode;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_cpu_mode_getendian, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_cpu_mode_getendian) */
+/*lint -esym(714, okl4_cpu_mode_getendian) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_cpu_mode_getendian(const okl4_cpu_mode_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_cpu_mode_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_cpu_mode_setendian, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_cpu_mode_setendian) */
+
+/*lint -esym(621, okl4_cpu_mode_setendian) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setendian(okl4_cpu_mode_t *x, okl4_bool_t _endian)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_cpu_mode_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_endian;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_cpu_mode_init) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_init(okl4_cpu_mode_t *x)
+{
+    *x = (okl4_cpu_mode_t)0U;
+}
+
+/*lint -esym(714, okl4_cpu_mode_cast) */
+OKL4_FORCE_INLINE okl4_cpu_mode_t
+okl4_cpu_mode_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_cpu_mode_t x = (okl4_cpu_mode_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct _okl4_env_hdr {
+    uint16_t magic;
+    uint16_t count;
+};
+
+
+
+
+
+
+
+struct _okl4_env_item {
+    __ptr64(okl4_string_t, name);
+    __ptr64(void *, item);
+};
+
+
+
+
+
+
+/**
+    The OKL4 environment.  It is a dictionary that maps strings to
+    arbitary objects.  The content of the environment is defined
+    during system construction time, and is read-only during run
+    time.
+*/
+
+struct _okl4_env {
+    struct _okl4_env_hdr env_hdr;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct _okl4_env_item env_item[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_access_cell {
+    __ptr64(okl4_string_t, name);
+    okl4_count_t num_entries;
+    okl4_count_t start_entry;
+};
+
+
+
+
+/**
+    The okl4_page_perms_t object represents a set of access permissions for
+    page mappings.
+
+    - @ref OKL4_PAGE_PERMS_NONE
+    - @ref OKL4_PAGE_PERMS_X
+    - @ref OKL4_PAGE_PERMS_W
+    - @ref OKL4_PAGE_PERMS_WX
+    - @ref OKL4_PAGE_PERMS_R
+    - @ref OKL4_PAGE_PERMS_RX
+    - @ref OKL4_PAGE_PERMS_RW
+    - @ref OKL4_PAGE_PERMS_RWX
+    - @ref OKL4_PAGE_PERMS_MAX
+    - @ref OKL4_PAGE_PERMS_INVALID
+*/
+
+typedef uint32_t okl4_page_perms_t;
+
+/*lint -esym(621, OKL4_PAGE_PERMS_NONE) */
+#define OKL4_PAGE_PERMS_NONE ((okl4_page_perms_t)0x0U)
+/*lint -esym(621, OKL4_PAGE_PERMS_X) */
+#define OKL4_PAGE_PERMS_X ((okl4_page_perms_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_PERMS_W) */
+#define OKL4_PAGE_PERMS_W ((okl4_page_perms_t)0x2U)
+/*lint -esym(621, OKL4_PAGE_PERMS_WX) */
+#define OKL4_PAGE_PERMS_WX ((okl4_page_perms_t)0x3U)
+/*lint -esym(621, OKL4_PAGE_PERMS_R) */
+#define OKL4_PAGE_PERMS_R ((okl4_page_perms_t)0x4U)
+/*lint -esym(621, OKL4_PAGE_PERMS_RX) */
+#define OKL4_PAGE_PERMS_RX ((okl4_page_perms_t)0x5U)
+/*lint -esym(621, OKL4_PAGE_PERMS_RW) */
+#define OKL4_PAGE_PERMS_RW ((okl4_page_perms_t)0x6U)
+/*lint -esym(621, OKL4_PAGE_PERMS_RWX) */
+#define OKL4_PAGE_PERMS_RWX ((okl4_page_perms_t)0x7U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_PERMS_MAX) */
+#define OKL4_PAGE_PERMS_MAX ((okl4_page_perms_t)0x7U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_PERMS_INVALID) */
+#define OKL4_PAGE_PERMS_INVALID ((okl4_page_perms_t)0xffffffffU)
+
+/*lint -esym(714, okl4_page_perms_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_perms_is_element_of(okl4_page_perms_t var);
+
+
+/*lint -esym(714, okl4_page_perms_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_perms_is_element_of(okl4_page_perms_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_PAGE_PERMS_NONE) ||
+            (var == OKL4_PAGE_PERMS_X) ||
+            (var == OKL4_PAGE_PERMS_W) ||
+            (var == OKL4_PAGE_PERMS_WX) ||
+            (var == OKL4_PAGE_PERMS_R) ||
+            (var == OKL4_PAGE_PERMS_RX) ||
+            (var == OKL4_PAGE_PERMS_RW) ||
+            (var == OKL4_PAGE_PERMS_RWX));
+}
+
+
+/**
+
+*/
+
+struct okl4_env_access_entry {
+    okl4_laddr_t virtual_address;
+    okl4_psize_t offset;
+    okl4_psize_t size;
+    okl4_count_t num_segs;
+    okl4_count_t segment_index;
+    okl4_page_cache_t cache_attrs;
+    okl4_page_perms_t permissions;
+    __ptr64(okl4_string_t, object_name);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_access_table {
+    okl4_count_t num_cells;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(struct okl4_env_access_cell *, cells);
+    __ptr64(struct okl4_env_access_entry *, entries);
+};
+
+
+
+
+/**
+    This object contains command-line arguments passed to
+    user-level programs.
+*/
+
+struct okl4_env_args {
+    okl4_count_t argc;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64_array(okl4_string_t, argv)[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    The okl4_env_interrupt_device_map_t type represents a list of interrupt
+    numbers (IRQs) that are connected to a given peripheral
+    device.  Objects of this type are typically obtained from
+    the OKL4 environment.
+*/
+
+struct okl4_env_interrupt_device_map {
+    okl4_count_t num_entries;
+    okl4_interrupt_number_t entries[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    The okl4_interrupt_t structure is used to represent a kernel interrupt
+    object.
+*/
+
+struct okl4_interrupt {
+    okl4_kcap_t kcap;
+};
+
+
+
+
+/**
+    The okl4_env_interrupt_handle_t type stores the information required to
+    perform operations on a interrupt.
+*/
+
+struct okl4_env_interrupt_handle {
+    okl4_interrupt_number_t descriptor;
+    struct okl4_interrupt interrupt;
+};
+
+
+
+
+/**
+    The okl4_env_interrupt_list_t type stores a list of interrupt handle objects
+    which represent all the interrupts that are available to the cell.
+    Objects of this type are typically obtained from
+    the OKL4 environment.
+*/
+
+struct okl4_env_interrupt_list {
+    okl4_count_t num_entries;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(okl4_interrupt_number_t *, descriptor);
+    __ptr64(struct okl4_interrupt *, interrupt);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_profile_cell {
+    okl4_char_t name[32];
+    okl4_count_t num_cores;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(struct okl4_env_profile_cpu *, core);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_profile_cpu {
+    okl4_kcap_t cap;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_profile_table {
+    okl4_count_t num_cell_entries;
+    okl4_count_t pcpu_cell_entry;
+    __ptr64(struct okl4_env_profile_cell *, cells);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_segment {
+    okl4_paddr_t base;
+    okl4_psize_t size;
+    okl4_kcap_t cap_id;
+    okl4_page_perms_t rwx;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_segment_table {
+    okl4_count_t num_segments;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct okl4_env_segment segments[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    The `okl4_error_t` type represents an error condition returned by the
+    OKL4 API.
+
+    See OKL4_ERROR_*
+
+    - @ref OKL4_ERROR_KSP_OK
+    - @ref OKL4_ERROR_OK
+    - @ref OKL4_ERROR_ALREADY_STARTED
+    - @ref OKL4_ERROR_ALREADY_STOPPED
+    - @ref OKL4_ERROR_AXON_AREA_TOO_BIG
+    - @ref OKL4_ERROR_AXON_BAD_MESSAGE_SIZE
+    - @ref OKL4_ERROR_AXON_INVALID_OFFSET
+    - @ref OKL4_ERROR_AXON_QUEUE_NOT_MAPPED
+    - @ref OKL4_ERROR_AXON_QUEUE_NOT_READY
+    - @ref OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED
+    - @ref OKL4_ERROR_CANCELLED
+    - @ref OKL4_ERROR_EXISTING_MAPPING
+    - @ref OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS
+    - @ref OKL4_ERROR_INTERRUPTED
+    - @ref OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED
+    - @ref OKL4_ERROR_INTERRUPT_INVALID_IRQ
+    - @ref OKL4_ERROR_INTERRUPT_NOT_ATTACHED
+    - @ref OKL4_ERROR_INVALID_ARGUMENT
+    - @ref OKL4_ERROR_INVALID_DESIGNATOR
+    - @ref OKL4_ERROR_INVALID_POWER_STATE
+    - @ref OKL4_ERROR_INVALID_SEGMENT_INDEX
+    - @ref OKL4_ERROR_MEMORY_FAULT
+    - @ref OKL4_ERROR_MISSING_MAPPING
+    - @ref OKL4_ERROR_NON_EMPTY_MMU_CONTEXT
+    - @ref OKL4_ERROR_NOT_IN_SEGMENT
+    - @ref OKL4_ERROR_NOT_LAST_CPU
+    - @ref OKL4_ERROR_NO_RESOURCES
+    - @ref OKL4_ERROR_PIPE_BAD_STATE
+    - @ref OKL4_ERROR_PIPE_EMPTY
+    - @ref OKL4_ERROR_PIPE_FULL
+    - @ref OKL4_ERROR_PIPE_NOT_READY
+    - @ref OKL4_ERROR_PIPE_RECV_OVERFLOW
+    - @ref OKL4_ERROR_POWER_VCPU_RESUMED
+    - @ref OKL4_ERROR_SEGMENT_USED
+    - @ref OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED
+    - @ref OKL4_ERROR_TIMER_ACTIVE
+    - @ref OKL4_ERROR_TIMER_CANCELLED
+    - @ref OKL4_ERROR_TRY_AGAIN
+    - @ref OKL4_ERROR_WOULD_BLOCK
+    - @ref OKL4_ERROR_ALLOC_EXHAUSTED
+    - @ref OKL4_ERROR_KSP_ERROR_0
+    - @ref OKL4_ERROR_KSP_ERROR_1
+    - @ref OKL4_ERROR_KSP_ERROR_2
+    - @ref OKL4_ERROR_KSP_ERROR_3
+    - @ref OKL4_ERROR_KSP_ERROR_4
+    - @ref OKL4_ERROR_KSP_ERROR_5
+    - @ref OKL4_ERROR_KSP_ERROR_6
+    - @ref OKL4_ERROR_KSP_ERROR_7
+    - @ref OKL4_ERROR_KSP_INVALID_ARG
+    - @ref OKL4_ERROR_KSP_NOT_IMPLEMENTED
+    - @ref OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS
+    - @ref OKL4_ERROR_KSP_INTERRUPT_REGISTERED
+    - @ref OKL4_ERROR_NOT_IMPLEMENTED
+    - @ref OKL4_ERROR_MAX
+*/
+
+typedef uint32_t okl4_error_t;
+
+/**
+    KSP returned OK
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_OK) */
+#define OKL4_ERROR_KSP_OK ((okl4_error_t)0x0U)
+/**
+    The operation succeeded
+*/
+/*lint -esym(621, OKL4_ERROR_OK) */
+#define OKL4_ERROR_OK ((okl4_error_t)0x0U)
+/**
+    The target vCPU was already running.
+*/
+/*lint -esym(621, OKL4_ERROR_ALREADY_STARTED) */
+#define OKL4_ERROR_ALREADY_STARTED ((okl4_error_t)0x1U)
+/**
+    The target vCPU was not running.
+*/
+/*lint -esym(621, OKL4_ERROR_ALREADY_STOPPED) */
+#define OKL4_ERROR_ALREADY_STOPPED ((okl4_error_t)0x2U)
+/*lint -esym(621, OKL4_ERROR_AXON_AREA_TOO_BIG) */
+#define OKL4_ERROR_AXON_AREA_TOO_BIG ((okl4_error_t)0x3U)
+/*lint -esym(621, OKL4_ERROR_AXON_BAD_MESSAGE_SIZE) */
+#define OKL4_ERROR_AXON_BAD_MESSAGE_SIZE ((okl4_error_t)0x4U)
+/*lint -esym(621, OKL4_ERROR_AXON_INVALID_OFFSET) */
+#define OKL4_ERROR_AXON_INVALID_OFFSET ((okl4_error_t)0x5U)
+/*lint -esym(621, OKL4_ERROR_AXON_QUEUE_NOT_MAPPED) */
+#define OKL4_ERROR_AXON_QUEUE_NOT_MAPPED ((okl4_error_t)0x6U)
+/*lint -esym(621, OKL4_ERROR_AXON_QUEUE_NOT_READY) */
+#define OKL4_ERROR_AXON_QUEUE_NOT_READY ((okl4_error_t)0x7U)
+/*lint -esym(621, OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED) */
+#define OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED ((okl4_error_t)0x8U)
+/**
+    A blocking operation was cancelled due to an abort of the operation.
+*/
+/*lint -esym(621, OKL4_ERROR_CANCELLED) */
+#define OKL4_ERROR_CANCELLED ((okl4_error_t)0x9U)
+/**
+    The operation failed due to an existing mapping.  Mapping
+    operations must not overlap an existing mapping.  Unmapping
+    must be performed at the same size as the original mapping.
+*/
+/*lint -esym(621, OKL4_ERROR_EXISTING_MAPPING) */
+#define OKL4_ERROR_EXISTING_MAPPING ((okl4_error_t)0xaU)
+/**
+    The operation requested with a segment failed due to
+    insufficient rights in the segment.
+*/
+/*lint -esym(621, OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS) */
+#define OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS ((okl4_error_t)0xbU)
+/**
+    The operation did not complete because it was interrupted by a
+    preemption.  This error value is only used internally.
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPTED) */
+#define OKL4_ERROR_INTERRUPTED ((okl4_error_t)0xcU)
+/**
+    Attempt to attach an interrupt to an IRQ number, when the
+    interrupt is already attached to an IRQ number
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED) */
+#define OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED ((okl4_error_t)0xdU)
+/**
+    Attempt to use an IRQ number that is out of range, of
+    the wrong type, or not in the correct state
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPT_INVALID_IRQ) */
+#define OKL4_ERROR_INTERRUPT_INVALID_IRQ ((okl4_error_t)0xeU)
+/**
+    Attempt to operate on an unknown IRQ number
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPT_NOT_ATTACHED) */
+#define OKL4_ERROR_INTERRUPT_NOT_ATTACHED ((okl4_error_t)0xfU)
+/**
+    An invalid argument was provided.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_ARGUMENT) */
+#define OKL4_ERROR_INVALID_ARGUMENT ((okl4_error_t)0x10U)
+/**
+    The operation failed because one of the arguments does not refer to a
+    valid object.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_DESIGNATOR) */
+#define OKL4_ERROR_INVALID_DESIGNATOR ((okl4_error_t)0x11U)
+/**
+    The operation failed because the power_state
+    argument is invalid.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_POWER_STATE) */
+#define OKL4_ERROR_INVALID_POWER_STATE ((okl4_error_t)0x12U)
+/**
+    The operation failed because the given segment index does
+    not correspond to an attached physical segment.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_SEGMENT_INDEX) */
+#define OKL4_ERROR_INVALID_SEGMENT_INDEX ((okl4_error_t)0x13U)
+/**
+    A user provided address produced a read or write fault in the operation.
+*/
+/*lint -esym(621, OKL4_ERROR_MEMORY_FAULT) */
+#define OKL4_ERROR_MEMORY_FAULT ((okl4_error_t)0x14U)
+/**
+    The operation failed because there is no mapping at the
+    specified location.
+*/
+/*lint -esym(621, OKL4_ERROR_MISSING_MAPPING) */
+#define OKL4_ERROR_MISSING_MAPPING ((okl4_error_t)0x15U)
+/**
+    The delete operation failed because the KMMU context is not
+    empty.
+*/
+/*lint -esym(621, OKL4_ERROR_NON_EMPTY_MMU_CONTEXT) */
+#define OKL4_ERROR_NON_EMPTY_MMU_CONTEXT ((okl4_error_t)0x16U)
+/**
+    The lookup operation failed because the given virtual address
+    of the given KMMU context is not mapped at the given physical
+    segment.
+*/
+/*lint -esym(621, OKL4_ERROR_NOT_IN_SEGMENT) */
+#define OKL4_ERROR_NOT_IN_SEGMENT ((okl4_error_t)0x17U)
+/**
+    The operation failed because the caller is not on the last
+    online cpu.
+*/
+/*lint -esym(621, OKL4_ERROR_NOT_LAST_CPU) */
+#define OKL4_ERROR_NOT_LAST_CPU ((okl4_error_t)0x18U)
+/**
+    Insufficient resources are available to perform the operation.
+*/
+/*lint -esym(621, OKL4_ERROR_NO_RESOURCES) */
+#define OKL4_ERROR_NO_RESOURCES ((okl4_error_t)0x19U)
+/**
+    Operation failed because pipe was not in the required state.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_BAD_STATE) */
+#define OKL4_ERROR_PIPE_BAD_STATE ((okl4_error_t)0x1aU)
+/**
+    Operation failed because no messages are in the queue.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_EMPTY) */
+#define OKL4_ERROR_PIPE_EMPTY ((okl4_error_t)0x1bU)
+/**
+    Operation failed because no memory is available in the queue.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_FULL) */
+#define OKL4_ERROR_PIPE_FULL ((okl4_error_t)0x1cU)
+/**
+    Operation failed because the pipe is in reset or not ready.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_NOT_READY) */
+#define OKL4_ERROR_PIPE_NOT_READY ((okl4_error_t)0x1dU)
+/**
+    Message was truncated because receive buffer size is too small.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_RECV_OVERFLOW) */
+#define OKL4_ERROR_PIPE_RECV_OVERFLOW ((okl4_error_t)0x1eU)
+/**
+    The operation failed because at least one VCPU has a monitored
+    power state and is not currently suspended.
+*/
+/*lint -esym(621, OKL4_ERROR_POWER_VCPU_RESUMED) */
+#define OKL4_ERROR_POWER_VCPU_RESUMED ((okl4_error_t)0x1fU)
+/**
+    The operation requires a segment to be unused, or not attached
+    to an MMU context.
+*/
+/*lint -esym(621, OKL4_ERROR_SEGMENT_USED) */
+#define OKL4_ERROR_SEGMENT_USED ((okl4_error_t)0x20U)
+/*lint -esym(621, OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) */
+#define OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED ((okl4_error_t)0x21U)
+/**
+    The timer is already active, and was not reprogrammed.
+*/
+/*lint -esym(621, OKL4_ERROR_TIMER_ACTIVE) */
+#define OKL4_ERROR_TIMER_ACTIVE ((okl4_error_t)0x22U)
+/**
+    The timer has already been cancelled or expired.
+*/
+/*lint -esym(621, OKL4_ERROR_TIMER_CANCELLED) */
+#define OKL4_ERROR_TIMER_CANCELLED ((okl4_error_t)0x23U)
+/**
+    Operation failed due to a temporary condition, and may be retried.
+*/
+/*lint -esym(621, OKL4_ERROR_TRY_AGAIN) */
+#define OKL4_ERROR_TRY_AGAIN ((okl4_error_t)0x24U)
+/**
+    The non-blocking operation failed because it would
+    block on a resource.
+*/
+/*lint -esym(621, OKL4_ERROR_WOULD_BLOCK) */
+#define OKL4_ERROR_WOULD_BLOCK ((okl4_error_t)0x25U)
+/**
+    Insufficient resources
+*/
+/*lint -esym(621, OKL4_ERROR_ALLOC_EXHAUSTED) */
+#define OKL4_ERROR_ALLOC_EXHAUSTED ((okl4_error_t)0x26U)
+/**
+    KSP specific error 0
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_0) */
+#define OKL4_ERROR_KSP_ERROR_0 ((okl4_error_t)0x10000010U)
+/**
+    KSP specific error 1
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_1) */
+#define OKL4_ERROR_KSP_ERROR_1 ((okl4_error_t)0x10000011U)
+/**
+    KSP specific error 2
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_2) */
+#define OKL4_ERROR_KSP_ERROR_2 ((okl4_error_t)0x10000012U)
+/**
+    KSP specific error 3
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_3) */
+#define OKL4_ERROR_KSP_ERROR_3 ((okl4_error_t)0x10000013U)
+/**
+    KSP specific error 4
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_4) */
+#define OKL4_ERROR_KSP_ERROR_4 ((okl4_error_t)0x10000014U)
+/**
+    KSP specific error 5
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_5) */
+#define OKL4_ERROR_KSP_ERROR_5 ((okl4_error_t)0x10000015U)
+/**
+    KSP specific error 6
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_6) */
+#define OKL4_ERROR_KSP_ERROR_6 ((okl4_error_t)0x10000016U)
+/**
+    KSP specific error 7
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_7) */
+#define OKL4_ERROR_KSP_ERROR_7 ((okl4_error_t)0x10000017U)
+/**
+    Invalid argument to KSP
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_INVALID_ARG) */
+#define OKL4_ERROR_KSP_INVALID_ARG ((okl4_error_t)0x80000001U)
+/**
+    KSP doesn't implement requested feature
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_NOT_IMPLEMENTED) */
+#define OKL4_ERROR_KSP_NOT_IMPLEMENTED ((okl4_error_t)0x80000002U)
+/**
+    User didn't supply rights for requested feature
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS) */
+#define OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS ((okl4_error_t)0x80000003U)
+/**
+    Interrupt already registered
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_INTERRUPT_REGISTERED) */
+#define OKL4_ERROR_KSP_INTERRUPT_REGISTERED ((okl4_error_t)0x80000004U)
+/**
+    Requested operation is not implemented.
+*/
+/*lint -esym(621, OKL4_ERROR_NOT_IMPLEMENTED) */
+#define OKL4_ERROR_NOT_IMPLEMENTED ((okl4_error_t)0xffffffffU)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ERROR_MAX) */
+#define OKL4_ERROR_MAX ((okl4_error_t)0xffffffffU)
+
+/*lint -esym(714, okl4_error_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_error_is_element_of(okl4_error_t var);
+
+
+/*lint -esym(714, okl4_error_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_error_is_element_of(okl4_error_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ERROR_ALREADY_STARTED) ||
+            (var == OKL4_ERROR_ALREADY_STOPPED) ||
+            (var == OKL4_ERROR_AXON_AREA_TOO_BIG) ||
+            (var == OKL4_ERROR_AXON_BAD_MESSAGE_SIZE) ||
+            (var == OKL4_ERROR_AXON_INVALID_OFFSET) ||
+            (var == OKL4_ERROR_AXON_QUEUE_NOT_MAPPED) ||
+            (var == OKL4_ERROR_AXON_QUEUE_NOT_READY) ||
+            (var == OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED) ||
+            (var == OKL4_ERROR_CANCELLED) ||
+            (var == OKL4_ERROR_EXISTING_MAPPING) ||
+            (var == OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS) ||
+            (var == OKL4_ERROR_INTERRUPTED) ||
+            (var == OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED) ||
+            (var == OKL4_ERROR_INTERRUPT_INVALID_IRQ) ||
+            (var == OKL4_ERROR_INTERRUPT_NOT_ATTACHED) ||
+            (var == OKL4_ERROR_INVALID_ARGUMENT) ||
+            (var == OKL4_ERROR_INVALID_DESIGNATOR) ||
+            (var == OKL4_ERROR_INVALID_POWER_STATE) ||
+            (var == OKL4_ERROR_INVALID_SEGMENT_INDEX) ||
+            (var == OKL4_ERROR_KSP_ERROR_0) ||
+            (var == OKL4_ERROR_KSP_ERROR_1) ||
+            (var == OKL4_ERROR_KSP_ERROR_2) ||
+            (var == OKL4_ERROR_KSP_ERROR_3) ||
+            (var == OKL4_ERROR_KSP_ERROR_4) ||
+            (var == OKL4_ERROR_KSP_ERROR_5) ||
+            (var == OKL4_ERROR_KSP_ERROR_6) ||
+            (var == OKL4_ERROR_KSP_ERROR_7) ||
+            (var == OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS) ||
+            (var == OKL4_ERROR_KSP_INTERRUPT_REGISTERED) ||
+            (var == OKL4_ERROR_KSP_INVALID_ARG) ||
+            (var == OKL4_ERROR_KSP_NOT_IMPLEMENTED) ||
+            (var == OKL4_ERROR_KSP_OK) ||
+            (var == OKL4_ERROR_MEMORY_FAULT) ||
+            (var == OKL4_ERROR_MISSING_MAPPING) ||
+            (var == OKL4_ERROR_NON_EMPTY_MMU_CONTEXT) ||
+            (var == OKL4_ERROR_NOT_IMPLEMENTED) ||
+            (var == OKL4_ERROR_NOT_IN_SEGMENT) ||
+            (var == OKL4_ERROR_NOT_LAST_CPU) ||
+            (var == OKL4_ERROR_NO_RESOURCES) ||
+            (var == OKL4_ERROR_OK) ||
+            (var == OKL4_ERROR_PIPE_BAD_STATE) ||
+            (var == OKL4_ERROR_PIPE_EMPTY) ||
+            (var == OKL4_ERROR_PIPE_FULL) ||
+            (var == OKL4_ERROR_PIPE_NOT_READY) ||
+            (var == OKL4_ERROR_PIPE_RECV_OVERFLOW) ||
+            (var == OKL4_ERROR_POWER_VCPU_RESUMED) ||
+            (var == OKL4_ERROR_SEGMENT_USED) ||
+            (var == OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) ||
+            (var == OKL4_ERROR_TIMER_ACTIVE) ||
+            (var == OKL4_ERROR_TIMER_CANCELLED) ||
+            (var == OKL4_ERROR_TRY_AGAIN) ||
+            (var == OKL4_ERROR_WOULD_BLOCK) ||
+            (var == OKL4_ERROR_ALLOC_EXHAUSTED));
+}
+
+
+/**
+
+*/
+
+struct okl4_firmware_segment {
+    okl4_laddr_t copy_addr;
+    okl4_laddr_t exec_addr;
+    okl4_lsize_t filesz;
+    okl4_lsize_t memsz_diff;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_firmware_segments_info {
+    okl4_count_t num_segments;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct okl4_firmware_segment segments[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    - BIT 1 -   @ref OKL4_MASK_EDGE_GICD_ICFGR
+*/
+
+/*lint -esym(621, okl4_gicd_icfgr_t) */
+typedef uint32_t okl4_gicd_icfgr_t;
+
+/*lint -esym(621, okl4_gicd_icfgr_getedge) */
+/*lint -esym(714, okl4_gicd_icfgr_getedge) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_icfgr_getedge(const okl4_gicd_icfgr_t *x);
+
+/*lint -esym(621, okl4_gicd_icfgr_setedge) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_setedge(okl4_gicd_icfgr_t *x, okl4_bool_t _edge);
+
+/*lint -esym(714, okl4_gicd_icfgr_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_init(okl4_gicd_icfgr_t *x);
+
+/*lint -esym(714, okl4_gicd_icfgr_cast) */
+OKL4_FORCE_INLINE okl4_gicd_icfgr_t
+okl4_gicd_icfgr_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_GICD_ICFGR_EDGE_MASK) */
+#define OKL4_GICD_ICFGR_EDGE_MASK ((okl4_gicd_icfgr_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EDGE_GICD_ICFGR) */
+#define OKL4_MASK_EDGE_GICD_ICFGR ((okl4_gicd_icfgr_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_EDGE_GICD_ICFGR) */
+#define OKL4_SHIFT_EDGE_GICD_ICFGR (1)
+/*lint -esym(621, OKL4_WIDTH_EDGE_GICD_ICFGR) */
+#define OKL4_WIDTH_EDGE_GICD_ICFGR (1)
+
+
+/*lint -sem(okl4_gicd_icfgr_getedge, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_gicd_icfgr_getedge) */
+/*lint -esym(714, okl4_gicd_icfgr_getedge) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_icfgr_getedge(const okl4_gicd_icfgr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_gicd_icfgr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_icfgr_setedge, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_gicd_icfgr_setedge) */
+
+/*lint -esym(621, okl4_gicd_icfgr_setedge) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_setedge(okl4_gicd_icfgr_t *x, okl4_bool_t _edge)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_gicd_icfgr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_edge;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_gicd_icfgr_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_init(okl4_gicd_icfgr_t *x)
+{
+    *x = (okl4_gicd_icfgr_t)0U;
+}
+
+/*lint -esym(714, okl4_gicd_icfgr_cast) */
+OKL4_FORCE_INLINE okl4_gicd_icfgr_t
+okl4_gicd_icfgr_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_gicd_icfgr_t x = (okl4_gicd_icfgr_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+typedef uint32_t okl4_sgi_target_t;
+
+/*lint -esym(621, OKL4_SGI_TARGET_LISTED) */
+#define OKL4_SGI_TARGET_LISTED ((okl4_sgi_target_t)0x0U)
+/*lint -esym(621, OKL4_SGI_TARGET_ALL_OTHERS) */
+#define OKL4_SGI_TARGET_ALL_OTHERS ((okl4_sgi_target_t)0x1U)
+/*lint -esym(621, OKL4_SGI_TARGET_SELF) */
+#define OKL4_SGI_TARGET_SELF ((okl4_sgi_target_t)0x2U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_SGI_TARGET_MAX) */
+#define OKL4_SGI_TARGET_MAX ((okl4_sgi_target_t)0x2U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_SGI_TARGET_INVALID) */
+#define OKL4_SGI_TARGET_INVALID ((okl4_sgi_target_t)0xffffffffU)
+
+/*lint -esym(714, okl4_sgi_target_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_sgi_target_is_element_of(okl4_sgi_target_t var);
+
+
+/*lint -esym(714, okl4_sgi_target_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_sgi_target_is_element_of(okl4_sgi_target_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_SGI_TARGET_LISTED) ||
+            (var == OKL4_SGI_TARGET_ALL_OTHERS) ||
+            (var == OKL4_SGI_TARGET_SELF));
+}
+
+
+/**
+    - BITS 3..0 -   @ref OKL4_MASK_SGIINTID_GICD_SGIR
+    - BIT 15 -   @ref OKL4_MASK_NSATT_GICD_SGIR
+    - BITS 23..16 -   @ref OKL4_MASK_CPUTARGETLIST_GICD_SGIR
+    - BITS 25..24 -   @ref OKL4_MASK_TARGETLISTFILTER_GICD_SGIR
+*/
+
+/*lint -esym(621, okl4_gicd_sgir_t) */
+typedef uint32_t okl4_gicd_sgir_t;
+
+/*lint -esym(621, okl4_gicd_sgir_getsgiintid) */
+/*lint -esym(714, okl4_gicd_sgir_getsgiintid) */
+OKL4_FORCE_INLINE okl4_interrupt_number_t
+okl4_gicd_sgir_getsgiintid(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_setsgiintid) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setsgiintid(okl4_gicd_sgir_t *x, okl4_interrupt_number_t _sgiintid);
+
+/*lint -esym(621, okl4_gicd_sgir_getnsatt) */
+/*lint -esym(714, okl4_gicd_sgir_getnsatt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_sgir_getnsatt(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_setnsatt) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setnsatt(okl4_gicd_sgir_t *x, okl4_bool_t _nsatt);
+
+/*lint -esym(621, okl4_gicd_sgir_getcputargetlist) */
+/*lint -esym(714, okl4_gicd_sgir_getcputargetlist) */
+OKL4_FORCE_INLINE uint8_t
+okl4_gicd_sgir_getcputargetlist(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_setcputargetlist) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setcputargetlist(okl4_gicd_sgir_t *x, uint8_t _cputargetlist);
+
+/*lint -esym(621, okl4_gicd_sgir_gettargetlistfilter) */
+/*lint -esym(714, okl4_gicd_sgir_gettargetlistfilter) */
+OKL4_FORCE_INLINE okl4_sgi_target_t
+okl4_gicd_sgir_gettargetlistfilter(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_settargetlistfilter) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_settargetlistfilter(okl4_gicd_sgir_t *x, okl4_sgi_target_t _targetlistfilter);
+
+/*lint -esym(714, okl4_gicd_sgir_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_init(okl4_gicd_sgir_t *x);
+
+/*lint -esym(714, okl4_gicd_sgir_cast) */
+OKL4_FORCE_INLINE okl4_gicd_sgir_t
+okl4_gicd_sgir_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_GICD_SGIR_SGIINTID_MASK) */
+#define OKL4_GICD_SGIR_SGIINTID_MASK ((okl4_gicd_sgir_t)15U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SGIINTID_GICD_SGIR) */
+#define OKL4_MASK_SGIINTID_GICD_SGIR ((okl4_gicd_sgir_t)15U)
+/*lint -esym(621, OKL4_SHIFT_SGIINTID_GICD_SGIR) */
+#define OKL4_SHIFT_SGIINTID_GICD_SGIR (0)
+/*lint -esym(621, OKL4_WIDTH_SGIINTID_GICD_SGIR) */
+#define OKL4_WIDTH_SGIINTID_GICD_SGIR (4)
+/*lint -esym(621, OKL4_GICD_SGIR_NSATT_MASK) */
+#define OKL4_GICD_SGIR_NSATT_MASK ((okl4_gicd_sgir_t)1U << 15) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_NSATT_GICD_SGIR) */
+#define OKL4_MASK_NSATT_GICD_SGIR ((okl4_gicd_sgir_t)1U << 15)
+/*lint -esym(621, OKL4_SHIFT_NSATT_GICD_SGIR) */
+#define OKL4_SHIFT_NSATT_GICD_SGIR (15)
+/*lint -esym(621, OKL4_WIDTH_NSATT_GICD_SGIR) */
+#define OKL4_WIDTH_NSATT_GICD_SGIR (1)
+/*lint -esym(621, OKL4_GICD_SGIR_CPUTARGETLIST_MASK) */
+#define OKL4_GICD_SGIR_CPUTARGETLIST_MASK ((okl4_gicd_sgir_t)255U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_MASK_CPUTARGETLIST_GICD_SGIR ((okl4_gicd_sgir_t)255U << 16)
+/*lint -esym(621, OKL4_SHIFT_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_SHIFT_CPUTARGETLIST_GICD_SGIR (16)
+/*lint -esym(621, OKL4_WIDTH_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_WIDTH_CPUTARGETLIST_GICD_SGIR (8)
+/*lint -esym(621, OKL4_GICD_SGIR_TARGETLISTFILTER_MASK) */
+#define OKL4_GICD_SGIR_TARGETLISTFILTER_MASK ((okl4_gicd_sgir_t)3U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_MASK_TARGETLISTFILTER_GICD_SGIR ((okl4_gicd_sgir_t)3U << 24)
+/*lint -esym(621, OKL4_SHIFT_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_SHIFT_TARGETLISTFILTER_GICD_SGIR (24)
+/*lint -esym(621, OKL4_WIDTH_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_WIDTH_TARGETLISTFILTER_GICD_SGIR (2)
+
+
+/*lint -sem(okl4_gicd_sgir_getsgiintid, 1p, @n >= 0 && @n <= 15) */
+/*lint -esym(621, okl4_gicd_sgir_getsgiintid) */
+/*lint -esym(714, okl4_gicd_sgir_getsgiintid) */
+OKL4_FORCE_INLINE okl4_interrupt_number_t
+okl4_gicd_sgir_getsgiintid(const okl4_gicd_sgir_t *x)
+{
+    okl4_interrupt_number_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 4;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_interrupt_number_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_setsgiintid, 2n >= 0 && 2n <= 15) */
+/*lint -esym(714, okl4_gicd_sgir_setsgiintid) */
+
+/*lint -esym(621, okl4_gicd_sgir_setsgiintid) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setsgiintid(okl4_gicd_sgir_t *x, okl4_interrupt_number_t _sgiintid)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 4;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_sgiintid;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_gicd_sgir_getnsatt, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_gicd_sgir_getnsatt) */
+/*lint -esym(714, okl4_gicd_sgir_getnsatt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_sgir_getnsatt(const okl4_gicd_sgir_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 15;
+            _Bool field : 1;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_setnsatt, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_gicd_sgir_setnsatt) */
+
+/*lint -esym(621, okl4_gicd_sgir_setnsatt) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setnsatt(okl4_gicd_sgir_t *x, okl4_bool_t _nsatt)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 15;
+            _Bool field : 1;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_nsatt;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_gicd_sgir_getcputargetlist, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_gicd_sgir_getcputargetlist) */
+/*lint -esym(714, okl4_gicd_sgir_getcputargetlist) */
+OKL4_FORCE_INLINE uint8_t
+okl4_gicd_sgir_getcputargetlist(const okl4_gicd_sgir_t *x)
+{
+    uint8_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 8;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint8_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_setcputargetlist, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_gicd_sgir_setcputargetlist) */
+
+/*lint -esym(621, okl4_gicd_sgir_setcputargetlist) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setcputargetlist(okl4_gicd_sgir_t *x, uint8_t _cputargetlist)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 8;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_cputargetlist;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_gicd_sgir_gettargetlistfilter, 1p, @n >= 0 && @n <= 3) */
+/*lint -esym(621, okl4_gicd_sgir_gettargetlistfilter) */
+/*lint -esym(714, okl4_gicd_sgir_gettargetlistfilter) */
+OKL4_FORCE_INLINE okl4_sgi_target_t
+okl4_gicd_sgir_gettargetlistfilter(const okl4_gicd_sgir_t *x)
+{
+    okl4_sgi_target_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 2;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_sgi_target_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_settargetlistfilter, 2n >= 0 && 2n <= 3) */
+/*lint -esym(714, okl4_gicd_sgir_settargetlistfilter) */
+
+/*lint -esym(621, okl4_gicd_sgir_settargetlistfilter) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_settargetlistfilter(okl4_gicd_sgir_t *x, okl4_sgi_target_t _targetlistfilter)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 2;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_targetlistfilter;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_gicd_sgir_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_init(okl4_gicd_sgir_t *x)
+{
+    *x = (okl4_gicd_sgir_t)32768U;
+}
+
+/*lint -esym(714, okl4_gicd_sgir_cast) */
+OKL4_FORCE_INLINE okl4_gicd_sgir_t
+okl4_gicd_sgir_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_gicd_sgir_t x = (okl4_gicd_sgir_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+    The okl4_kmmu_t structure is used to represent a kernel MMU
+    context.
+*/
+
+struct okl4_kmmu {
+    okl4_kcap_t kcap;
+};
+
+
+
+
+/**
+    The `okl4_ksp_arg_t` type represents an unsigned, machine-native
+    register-sized integer value used for KSP call arguments. Important: it is
+    truncated to guest register-size when guest register-size is smaller than
+    kernel register-size.
+*/
+
+typedef okl4_register_t okl4_ksp_arg_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_ksp_user_agent {
+    okl4_kcap_t kcap;
+    okl4_interrupt_number_t virq;
+};
+
+
+
+
+
+typedef uint32_t okl4_ksp_vdevice_class_t;
+
+
+
+
+
+typedef okl4_register_t okl4_laddr_pn_t;
+
+
+
+
+
+typedef okl4_register_t okl4_laddr_tr_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_pipe_data {
+    okl4_kcap_t kcap;
+    okl4_irq_t virq;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_pipe_ep_data {
+    struct okl4_pipe_data rx;
+    struct okl4_pipe_data tx;
+};
+
+
+
+
+
+typedef uint32_t okl4_link_role_t;
+
+/*lint -esym(621, OKL4_LINK_ROLE_SYMMETRIC) */
+#define OKL4_LINK_ROLE_SYMMETRIC ((okl4_link_role_t)0x0U)
+/*lint -esym(621, OKL4_LINK_ROLE_SERVER) */
+#define OKL4_LINK_ROLE_SERVER ((okl4_link_role_t)0x1U)
+/*lint -esym(621, OKL4_LINK_ROLE_CLIENT) */
+#define OKL4_LINK_ROLE_CLIENT ((okl4_link_role_t)0x2U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_ROLE_MAX) */
+#define OKL4_LINK_ROLE_MAX ((okl4_link_role_t)0x2U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_ROLE_INVALID) */
+#define OKL4_LINK_ROLE_INVALID ((okl4_link_role_t)0xffffffffU)
+
+/*lint -esym(714, okl4_link_role_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_role_is_element_of(okl4_link_role_t var);
+
+
+/*lint -esym(714, okl4_link_role_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_role_is_element_of(okl4_link_role_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_LINK_ROLE_SYMMETRIC) ||
+            (var == OKL4_LINK_ROLE_SERVER) ||
+            (var == OKL4_LINK_ROLE_CLIENT));
+}
+
+
+
+typedef uint32_t okl4_link_transport_type_t;
+
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_LINK_TRANSPORT_TYPE_SHARED_BUFFER ((okl4_link_transport_type_t)0x0U)
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_AXONS) */
+#define OKL4_LINK_TRANSPORT_TYPE_AXONS ((okl4_link_transport_type_t)0x1U)
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_PIPES) */
+#define OKL4_LINK_TRANSPORT_TYPE_PIPES ((okl4_link_transport_type_t)0x2U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_MAX) */
+#define OKL4_LINK_TRANSPORT_TYPE_MAX ((okl4_link_transport_type_t)0x2U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_INVALID) */
+#define OKL4_LINK_TRANSPORT_TYPE_INVALID ((okl4_link_transport_type_t)0xffffffffU)
+
+/*lint -esym(714, okl4_link_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_transport_type_is_element_of(okl4_link_transport_type_t var);
+
+
+/*lint -esym(714, okl4_link_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_transport_type_is_element_of(okl4_link_transport_type_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_LINK_TRANSPORT_TYPE_SHARED_BUFFER) ||
+            (var == OKL4_LINK_TRANSPORT_TYPE_AXONS) ||
+            (var == OKL4_LINK_TRANSPORT_TYPE_PIPES));
+}
+
+
+/**
+
+*/
+
+struct okl4_link {
+    __ptr64(okl4_string_t, name);
+    __ptr64(void *, opaque);
+    __ptr64(okl4_string_t, partner_name);
+    okl4_link_role_t role;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    union {
+        struct {
+            struct okl4_virtmem_item buffer;
+            okl4_irq_t virq_in;
+            okl4_kcap_t virq_out;
+        } shared_buffer;
+
+        struct {
+            struct okl4_axon_ep_data axon_ep;
+            okl4_ksize_t message_size;
+            okl4_count_t queue_length;
+            _okl4_padding_t __padding0_4; /**< Padding 8 */
+            _okl4_padding_t __padding1_5; /**< Padding 8 */
+            _okl4_padding_t __padding2_6; /**< Padding 8 */
+            _okl4_padding_t __padding3_7; /**< Padding 8 */
+        } axons;
+
+        struct {
+            okl4_ksize_t message_size;
+            struct okl4_pipe_ep_data pipe_ep;
+            okl4_count_t queue_length;
+            _okl4_padding_t __padding0_4; /**< Padding 8 */
+            _okl4_padding_t __padding1_5; /**< Padding 8 */
+            _okl4_padding_t __padding2_6; /**< Padding 8 */
+            _okl4_padding_t __padding3_7; /**< Padding 8 */
+        } pipes;
+
+    } transport;
+
+    okl4_link_transport_type_t transport_type;
+    _okl4_padding_t __padding4_4;
+    _okl4_padding_t __padding5_5;
+    _okl4_padding_t __padding6_6;
+    _okl4_padding_t __padding7_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_links {
+    okl4_count_t num_links;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64_array(struct okl4_link *, links)[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+typedef okl4_register_t okl4_lsize_pn_t;
+
+
+
+
+
+typedef okl4_register_t okl4_lsize_tr_t;
+
+
+
+
+/**
+    The okl4_machine_info_t structure holds machine-specific
+    constants that are only known at weave-time. Objects of this
+    type are typically obtained from the OKL4 environment.
+*/
+
+struct okl4_machine_info {
+    okl4_ksize_t l1_cache_line_size;
+    okl4_ksize_t l2_cache_line_size;
+    okl4_count_t num_cpus;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_merged_physpool {
+    okl4_paddr_t phys_addr;
+    okl4_count_t num_segments;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct okl4_virtmem_item segments[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+typedef uint32_t okl4_microseconds_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_microvisor_timer {
+    okl4_kcap_t kcap;
+    okl4_irq_t virq;
+};
+
+
+
+
+/**
+    - BITS 15..0 -   @ref OKL4_MASK_ERROR_MMU_LOOKUP_INDEX
+    - BITS 31..16 -   @ref OKL4_MASK_INDEX_MMU_LOOKUP_INDEX
+*/
+
+/*lint -esym(621, okl4_mmu_lookup_index_t) */
+typedef uint32_t okl4_mmu_lookup_index_t;
+
+/*lint -esym(621, okl4_mmu_lookup_index_geterror) */
+/*lint -esym(714, okl4_mmu_lookup_index_geterror) */
+OKL4_FORCE_INLINE okl4_error_t
+okl4_mmu_lookup_index_geterror(const okl4_mmu_lookup_index_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_index_seterror) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_seterror(okl4_mmu_lookup_index_t *x, okl4_error_t _error);
+
+/*lint -esym(621, okl4_mmu_lookup_index_getindex) */
+/*lint -esym(714, okl4_mmu_lookup_index_getindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_index_getindex(const okl4_mmu_lookup_index_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_index_setindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_setindex(okl4_mmu_lookup_index_t *x, okl4_count_t _index);
+
+/*lint -esym(714, okl4_mmu_lookup_index_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_init(okl4_mmu_lookup_index_t *x);
+
+/*lint -esym(714, okl4_mmu_lookup_index_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_index_t
+okl4_mmu_lookup_index_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_MMU_LOOKUP_INDEX_ERROR_MASK) */
+#define OKL4_MMU_LOOKUP_INDEX_ERROR_MASK ((okl4_mmu_lookup_index_t)65535U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_MASK_ERROR_MMU_LOOKUP_INDEX ((okl4_mmu_lookup_index_t)65535U)
+/*lint -esym(621, OKL4_SHIFT_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_SHIFT_ERROR_MMU_LOOKUP_INDEX (0)
+/*lint -esym(621, OKL4_WIDTH_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_WIDTH_ERROR_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_MMU_LOOKUP_INDEX_INDEX_MASK) */
+#define OKL4_MMU_LOOKUP_INDEX_INDEX_MASK ((okl4_mmu_lookup_index_t)65535U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_MASK_INDEX_MMU_LOOKUP_INDEX ((okl4_mmu_lookup_index_t)65535U << 16)
+/*lint -esym(621, OKL4_SHIFT_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_SHIFT_INDEX_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_WIDTH_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_WIDTH_INDEX_MMU_LOOKUP_INDEX (16)
+
+
+/*lint -sem(okl4_mmu_lookup_index_geterror, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_mmu_lookup_index_geterror) */
+/*lint -esym(714, okl4_mmu_lookup_index_geterror) */
+OKL4_FORCE_INLINE okl4_error_t
+okl4_mmu_lookup_index_geterror(const okl4_mmu_lookup_index_t *x)
+{
+    okl4_error_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_mmu_lookup_index_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_error_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_index_seterror, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_mmu_lookup_index_seterror) */
+
+/*lint -esym(621, okl4_mmu_lookup_index_seterror) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_seterror(okl4_mmu_lookup_index_t *x, okl4_error_t _error)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_mmu_lookup_index_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_error;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_mmu_lookup_index_getindex, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_mmu_lookup_index_getindex) */
+/*lint -esym(714, okl4_mmu_lookup_index_getindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_index_getindex(const okl4_mmu_lookup_index_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        okl4_mmu_lookup_index_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_index_setindex, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_mmu_lookup_index_setindex) */
+
+/*lint -esym(621, okl4_mmu_lookup_index_setindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_setindex(okl4_mmu_lookup_index_t *x, okl4_count_t _index)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        okl4_mmu_lookup_index_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_index;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_mmu_lookup_index_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_init(okl4_mmu_lookup_index_t *x)
+{
+    *x = (okl4_mmu_lookup_index_t)0U;
+}
+
+/*lint -esym(714, okl4_mmu_lookup_index_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_index_t
+okl4_mmu_lookup_index_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_mmu_lookup_index_t x = (okl4_mmu_lookup_index_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BITS 9..0 -   @ref OKL4_MASK_SEG_INDEX_MMU_LOOKUP_SIZE
+    - BITS 63..10 -   @ref OKL4_MASK_SIZE_10_MMU_LOOKUP_SIZE
+*/
+
+/*lint -esym(621, okl4_mmu_lookup_size_t) */
+typedef okl4_register_t okl4_mmu_lookup_size_t;
+
+/*lint -esym(621, okl4_mmu_lookup_size_getsegindex) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsegindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_size_getsegindex(const okl4_mmu_lookup_size_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsegindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsegindex(okl4_mmu_lookup_size_t *x, okl4_count_t _seg_index);
+
+/*lint -esym(621, okl4_mmu_lookup_size_getsize10) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsize10) */
+OKL4_FORCE_INLINE okl4_register_t
+okl4_mmu_lookup_size_getsize10(const okl4_mmu_lookup_size_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsize10) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsize10(okl4_mmu_lookup_size_t *x, okl4_register_t _size_10);
+
+/*lint -esym(714, okl4_mmu_lookup_size_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_init(okl4_mmu_lookup_size_t *x);
+
+/*lint -esym(714, okl4_mmu_lookup_size_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_size_t
+okl4_mmu_lookup_size_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_MMU_LOOKUP_SIZE_SEG_INDEX_MASK) */
+#define OKL4_MMU_LOOKUP_SIZE_SEG_INDEX_MASK ((okl4_mmu_lookup_size_t)1023U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_MASK_SEG_INDEX_MMU_LOOKUP_SIZE ((okl4_mmu_lookup_size_t)1023U)
+/*lint -esym(621, OKL4_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE (0)
+/*lint -esym(621, OKL4_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_MMU_LOOKUP_SIZE_SIZE_10_MASK) */
+#define OKL4_MMU_LOOKUP_SIZE_SIZE_10_MASK ((okl4_mmu_lookup_size_t)18014398509481983U << 10) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_MASK_SIZE_10_MMU_LOOKUP_SIZE ((okl4_mmu_lookup_size_t)18014398509481983U << 10)
+/*lint -esym(621, OKL4_SHIFT_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_SHIFT_SIZE_10_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_WIDTH_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_WIDTH_SIZE_10_MMU_LOOKUP_SIZE (54)
+
+
+/*lint -sem(okl4_mmu_lookup_size_getsegindex, 1p, @n >= 0 && @n <= 1023) */
+/*lint -esym(621, okl4_mmu_lookup_size_getsegindex) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsegindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_size_getsegindex(const okl4_mmu_lookup_size_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t field : 10;
+        } bits;
+        okl4_mmu_lookup_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_size_setsegindex, 2n >= 0 && 2n <= 1023) */
+/*lint -esym(714, okl4_mmu_lookup_size_setsegindex) */
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsegindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsegindex(okl4_mmu_lookup_size_t *x, okl4_count_t _seg_index)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t field : 10;
+        } bits;
+        okl4_mmu_lookup_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_seg_index;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_mmu_lookup_size_getsize10, 1p, @n >= 0 && @n <= 18014398509481983) */
+/*lint -esym(621, okl4_mmu_lookup_size_getsize10) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsize10) */
+OKL4_FORCE_INLINE okl4_register_t
+okl4_mmu_lookup_size_getsize10(const okl4_mmu_lookup_size_t *x)
+{
+    okl4_register_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 10;
+            uint64_t field : 54;
+        } bits;
+        okl4_mmu_lookup_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_register_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_size_setsize10, 2n >= 0 && 2n <= 18014398509481983) */
+/*lint -esym(714, okl4_mmu_lookup_size_setsize10) */
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsize10) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsize10(okl4_mmu_lookup_size_t *x, okl4_register_t _size_10)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 10;
+            uint64_t field : 54;
+        } bits;
+        okl4_mmu_lookup_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_size_10;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_mmu_lookup_size_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_init(okl4_mmu_lookup_size_t *x)
+{
+    *x = (okl4_mmu_lookup_size_t)0U;
+}
+
+/*lint -esym(714, okl4_mmu_lookup_size_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_size_t
+okl4_mmu_lookup_size_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_mmu_lookup_size_t x = (okl4_mmu_lookup_size_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+typedef uint64_t okl4_nanoseconds_t;
+
+/** Timer period upper bound is (1 << 55) ns */
+/*lint -esym(621, OKL4_TIMER_MAX_PERIOD_NS) */
+#define OKL4_TIMER_MAX_PERIOD_NS ((okl4_nanoseconds_t)(36028797018963968U))
+
+/** Timer period lower bound is 1000000 ns */
+/*lint -esym(621, OKL4_TIMER_MIN_PERIOD_NS) */
+#define OKL4_TIMER_MIN_PERIOD_NS ((okl4_nanoseconds_t)(1000000U))
+
+
+
+/**
+    - BITS 2..0 -   @ref _OKL4_MASK_RWX_PAGE_ATTRIBUTE
+    - BITS 31..4 -   @ref _OKL4_MASK_ATTRIB_PAGE_ATTRIBUTE
+*/
+
+/*lint -esym(621, _okl4_page_attribute_t) */
+typedef uint32_t _okl4_page_attribute_t;
+
+/*lint -esym(621, _okl4_page_attribute_getrwx) */
+/*lint -esym(714, _okl4_page_attribute_getrwx) */
+OKL4_FORCE_INLINE okl4_page_perms_t
+_okl4_page_attribute_getrwx(const _okl4_page_attribute_t *x);
+
+/*lint -esym(621, _okl4_page_attribute_setrwx) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setrwx(_okl4_page_attribute_t *x, okl4_page_perms_t _rwx);
+
+/*lint -esym(621, _okl4_page_attribute_getattrib) */
+/*lint -esym(714, _okl4_page_attribute_getattrib) */
+OKL4_FORCE_INLINE okl4_page_cache_t
+_okl4_page_attribute_getattrib(const _okl4_page_attribute_t *x);
+
+/*lint -esym(621, _okl4_page_attribute_setattrib) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setattrib(_okl4_page_attribute_t *x, okl4_page_cache_t _attrib);
+
+/*lint -esym(714, _okl4_page_attribute_init) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_init(_okl4_page_attribute_t *x);
+
+/*lint -esym(714, _okl4_page_attribute_cast) */
+OKL4_FORCE_INLINE _okl4_page_attribute_t
+_okl4_page_attribute_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, _OKL4_PAGE_ATTRIBUTE_RWX_MASK) */
+#define _OKL4_PAGE_ATTRIBUTE_RWX_MASK ((_okl4_page_attribute_t)7U) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_MASK_RWX_PAGE_ATTRIBUTE ((_okl4_page_attribute_t)7U)
+/*lint -esym(621, _OKL4_SHIFT_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_SHIFT_RWX_PAGE_ATTRIBUTE (0)
+/*lint -esym(621, _OKL4_WIDTH_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_WIDTH_RWX_PAGE_ATTRIBUTE (3)
+/*lint -esym(621, _OKL4_PAGE_ATTRIBUTE_ATTRIB_MASK) */
+#define _OKL4_PAGE_ATTRIBUTE_ATTRIB_MASK ((_okl4_page_attribute_t)268435455U << 4) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_MASK_ATTRIB_PAGE_ATTRIBUTE ((_okl4_page_attribute_t)268435455U << 4)
+/*lint -esym(621, _OKL4_SHIFT_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_SHIFT_ATTRIB_PAGE_ATTRIBUTE (4)
+/*lint -esym(621, _OKL4_WIDTH_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_WIDTH_ATTRIB_PAGE_ATTRIBUTE (28)
+
+
+/*lint -sem(_okl4_page_attribute_getrwx, 1p, @n >= 0 && @n <= 7) */
+/*lint -esym(621, _okl4_page_attribute_getrwx) */
+/*lint -esym(714, _okl4_page_attribute_getrwx) */
+OKL4_FORCE_INLINE okl4_page_perms_t
+_okl4_page_attribute_getrwx(const _okl4_page_attribute_t *x)
+{
+    okl4_page_perms_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 3;
+        } bits;
+        _okl4_page_attribute_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_page_perms_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_page_attribute_setrwx, 2n >= 0 && 2n <= 7) */
+/*lint -esym(714, _okl4_page_attribute_setrwx) */
+
+/*lint -esym(621, _okl4_page_attribute_setrwx) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setrwx(_okl4_page_attribute_t *x, okl4_page_perms_t _rwx)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 3;
+        } bits;
+        _okl4_page_attribute_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_rwx;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_page_attribute_getattrib, 1p, @n >= 0 && @n <= 268435455) */
+/*lint -esym(621, _okl4_page_attribute_getattrib) */
+/*lint -esym(714, _okl4_page_attribute_getattrib) */
+OKL4_FORCE_INLINE okl4_page_cache_t
+_okl4_page_attribute_getattrib(const _okl4_page_attribute_t *x)
+{
+    okl4_page_cache_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            uint32_t field : 28;
+        } bits;
+        _okl4_page_attribute_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_page_cache_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_page_attribute_setattrib, 2n >= 0 && 2n <= 268435455) */
+/*lint -esym(714, _okl4_page_attribute_setattrib) */
+
+/*lint -esym(621, _okl4_page_attribute_setattrib) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setattrib(_okl4_page_attribute_t *x, okl4_page_cache_t _attrib)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            uint32_t field : 28;
+        } bits;
+        _okl4_page_attribute_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_attrib;
+    *x = _conv.raw;
+}
+/*lint -esym(714, _okl4_page_attribute_init) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_init(_okl4_page_attribute_t *x)
+{
+    *x = (_okl4_page_attribute_t)0U;
+}
+
+/*lint -esym(714, _okl4_page_attribute_cast) */
+OKL4_FORCE_INLINE _okl4_page_attribute_t
+_okl4_page_attribute_cast(uint32_t p, okl4_bool_t force)
+{
+    _okl4_page_attribute_t x = (_okl4_page_attribute_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_DO_OP_PIPE_CONTROL
+    - BITS 3..1 -   @ref OKL4_MASK_OPERATION_PIPE_CONTROL
+*/
+
+/*lint -esym(621, okl4_pipe_control_t) */
+typedef uint8_t okl4_pipe_control_t;
+
+/*lint -esym(621, okl4_pipe_control_getdoop) */
+/*lint -esym(714, okl4_pipe_control_getdoop) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_control_getdoop(const okl4_pipe_control_t *x);
+
+/*lint -esym(621, okl4_pipe_control_setdoop) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setdoop(okl4_pipe_control_t *x, okl4_bool_t _do_op);
+
+/*lint -esym(621, okl4_pipe_control_getoperation) */
+/*lint -esym(714, okl4_pipe_control_getoperation) */
+OKL4_FORCE_INLINE uint8_t
+okl4_pipe_control_getoperation(const okl4_pipe_control_t *x);
+
+/*lint -esym(621, okl4_pipe_control_setoperation) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setoperation(okl4_pipe_control_t *x, uint8_t _operation);
+
+/*lint -esym(714, okl4_pipe_control_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_init(okl4_pipe_control_t *x);
+
+/*lint -esym(714, okl4_pipe_control_cast) */
+OKL4_FORCE_INLINE okl4_pipe_control_t
+okl4_pipe_control_cast(uint8_t p, okl4_bool_t force);
+
+
+
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_CLR_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_CLR_HALTED ((okl4_pipe_control_t)(4U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_RESET) */
+#define OKL4_PIPE_CONTROL_OP_RESET ((okl4_pipe_control_t)(0U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_SET_HALTED ((okl4_pipe_control_t)(3U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_RX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_RX_READY ((okl4_pipe_control_t)(2U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_TX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_TX_READY ((okl4_pipe_control_t)(1U))
+
+/*lint -esym(621, OKL4_PIPE_CONTROL_DO_OP_MASK) */
+#define OKL4_PIPE_CONTROL_DO_OP_MASK (okl4_pipe_control_t)(1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_DO_OP_PIPE_CONTROL) */
+#define OKL4_MASK_DO_OP_PIPE_CONTROL (okl4_pipe_control_t)(1U)
+/*lint -esym(621, OKL4_SHIFT_DO_OP_PIPE_CONTROL) */
+#define OKL4_SHIFT_DO_OP_PIPE_CONTROL (0)
+/*lint -esym(621, OKL4_WIDTH_DO_OP_PIPE_CONTROL) */
+#define OKL4_WIDTH_DO_OP_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OPERATION_MASK) */
+#define OKL4_PIPE_CONTROL_OPERATION_MASK (okl4_pipe_control_t)(7U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OPERATION_PIPE_CONTROL) */
+#define OKL4_MASK_OPERATION_PIPE_CONTROL (okl4_pipe_control_t)(7U << 1)
+/*lint -esym(621, OKL4_SHIFT_OPERATION_PIPE_CONTROL) */
+#define OKL4_SHIFT_OPERATION_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_WIDTH_OPERATION_PIPE_CONTROL) */
+#define OKL4_WIDTH_OPERATION_PIPE_CONTROL (3)
+
+
+/*lint -sem(okl4_pipe_control_getdoop, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_control_getdoop) */
+/*lint -esym(714, okl4_pipe_control_getdoop) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_control_getdoop(const okl4_pipe_control_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_control_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_control_setdoop, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_control_setdoop) */
+
+/*lint -esym(621, okl4_pipe_control_setdoop) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setdoop(okl4_pipe_control_t *x, okl4_bool_t _do_op)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_control_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_do_op;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_control_getoperation, 1p, @n >= 0 && @n <= 7) */
+/*lint -esym(621, okl4_pipe_control_getoperation) */
+/*lint -esym(714, okl4_pipe_control_getoperation) */
+OKL4_FORCE_INLINE uint8_t
+okl4_pipe_control_getoperation(const okl4_pipe_control_t *x)
+{
+    uint8_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            uint32_t field : 3;
+        } bits;
+        okl4_pipe_control_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint8_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_control_setoperation, 2n >= 0 && 2n <= 7) */
+/*lint -esym(714, okl4_pipe_control_setoperation) */
+
+/*lint -esym(621, okl4_pipe_control_setoperation) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setoperation(okl4_pipe_control_t *x, uint8_t _operation)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            uint32_t field : 3;
+        } bits;
+        okl4_pipe_control_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_operation;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_pipe_control_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_init(okl4_pipe_control_t *x)
+{
+    *x = (okl4_pipe_control_t)0U;
+}
+
+/*lint -esym(714, okl4_pipe_control_cast) */
+OKL4_FORCE_INLINE okl4_pipe_control_t
+okl4_pipe_control_cast(uint8_t p, okl4_bool_t force)
+{
+    okl4_pipe_control_t x = (okl4_pipe_control_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_RESET_PIPE_STATE
+    - BIT 1 -   @ref OKL4_MASK_HALTED_PIPE_STATE
+    - BIT 2 -   @ref OKL4_MASK_RX_READY_PIPE_STATE
+    - BIT 3 -   @ref OKL4_MASK_TX_READY_PIPE_STATE
+    - BIT 4 -   @ref OKL4_MASK_RX_AVAILABLE_PIPE_STATE
+    - BIT 5 -   @ref OKL4_MASK_TX_AVAILABLE_PIPE_STATE
+    - BIT 6 -   @ref OKL4_MASK_WAITING_PIPE_STATE
+    - BIT 7 -   @ref OKL4_MASK_OVERQUOTA_PIPE_STATE
+*/
+
+/*lint -esym(621, okl4_pipe_state_t) */
+typedef uint8_t okl4_pipe_state_t;
+
+/*lint -esym(621, okl4_pipe_state_getreset) */
+/*lint -esym(714, okl4_pipe_state_getreset) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getreset(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setreset) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setreset(okl4_pipe_state_t *x, okl4_bool_t _reset);
+
+/*lint -esym(621, okl4_pipe_state_gethalted) */
+/*lint -esym(714, okl4_pipe_state_gethalted) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gethalted(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_sethalted) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_sethalted(okl4_pipe_state_t *x, okl4_bool_t _halted);
+
+/*lint -esym(621, okl4_pipe_state_getrxready) */
+/*lint -esym(714, okl4_pipe_state_getrxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxready(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setrxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxready(okl4_pipe_state_t *x, okl4_bool_t _rx_ready);
+
+/*lint -esym(621, okl4_pipe_state_gettxready) */
+/*lint -esym(714, okl4_pipe_state_gettxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxready(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_settxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxready(okl4_pipe_state_t *x, okl4_bool_t _tx_ready);
+
+/*lint -esym(621, okl4_pipe_state_getrxavailable) */
+/*lint -esym(714, okl4_pipe_state_getrxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxavailable(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setrxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxavailable(okl4_pipe_state_t *x, okl4_bool_t _rx_available);
+
+/*lint -esym(621, okl4_pipe_state_gettxavailable) */
+/*lint -esym(714, okl4_pipe_state_gettxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxavailable(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_settxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxavailable(okl4_pipe_state_t *x, okl4_bool_t _tx_available);
+
+/*lint -esym(621, okl4_pipe_state_getwaiting) */
+/*lint -esym(714, okl4_pipe_state_getwaiting) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getwaiting(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setwaiting) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setwaiting(okl4_pipe_state_t *x, okl4_bool_t _waiting);
+
+/*lint -esym(621, okl4_pipe_state_getoverquota) */
+/*lint -esym(714, okl4_pipe_state_getoverquota) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getoverquota(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setoverquota) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setoverquota(okl4_pipe_state_t *x, okl4_bool_t _overquota);
+
+/*lint -esym(714, okl4_pipe_state_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_init(okl4_pipe_state_t *x);
+
+/*lint -esym(714, okl4_pipe_state_cast) */
+OKL4_FORCE_INLINE okl4_pipe_state_t
+okl4_pipe_state_cast(uint8_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_PIPE_STATE_RESET_MASK) */
+#define OKL4_PIPE_STATE_RESET_MASK (okl4_pipe_state_t)(1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESET_PIPE_STATE) */
+#define OKL4_MASK_RESET_PIPE_STATE (okl4_pipe_state_t)(1U)
+/*lint -esym(621, OKL4_SHIFT_RESET_PIPE_STATE) */
+#define OKL4_SHIFT_RESET_PIPE_STATE (0)
+/*lint -esym(621, OKL4_WIDTH_RESET_PIPE_STATE) */
+#define OKL4_WIDTH_RESET_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_HALTED_MASK) */
+#define OKL4_PIPE_STATE_HALTED_MASK (okl4_pipe_state_t)(1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_HALTED_PIPE_STATE) */
+#define OKL4_MASK_HALTED_PIPE_STATE (okl4_pipe_state_t)(1U << 1)
+/*lint -esym(621, OKL4_SHIFT_HALTED_PIPE_STATE) */
+#define OKL4_SHIFT_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_WIDTH_HALTED_PIPE_STATE) */
+#define OKL4_WIDTH_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_RX_READY_MASK) */
+#define OKL4_PIPE_STATE_RX_READY_MASK (okl4_pipe_state_t)(1U << 2) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RX_READY_PIPE_STATE) */
+#define OKL4_MASK_RX_READY_PIPE_STATE (okl4_pipe_state_t)(1U << 2)
+/*lint -esym(621, OKL4_SHIFT_RX_READY_PIPE_STATE) */
+#define OKL4_SHIFT_RX_READY_PIPE_STATE (2)
+/*lint -esym(621, OKL4_WIDTH_RX_READY_PIPE_STATE) */
+#define OKL4_WIDTH_RX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_TX_READY_MASK) */
+#define OKL4_PIPE_STATE_TX_READY_MASK (okl4_pipe_state_t)(1U << 3) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TX_READY_PIPE_STATE) */
+#define OKL4_MASK_TX_READY_PIPE_STATE (okl4_pipe_state_t)(1U << 3)
+/*lint -esym(621, OKL4_SHIFT_TX_READY_PIPE_STATE) */
+#define OKL4_SHIFT_TX_READY_PIPE_STATE (3)
+/*lint -esym(621, OKL4_WIDTH_TX_READY_PIPE_STATE) */
+#define OKL4_WIDTH_TX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_RX_AVAILABLE_MASK) */
+#define OKL4_PIPE_STATE_RX_AVAILABLE_MASK (okl4_pipe_state_t)(1U << 4) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_MASK_RX_AVAILABLE_PIPE_STATE (okl4_pipe_state_t)(1U << 4)
+/*lint -esym(621, OKL4_SHIFT_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_SHIFT_RX_AVAILABLE_PIPE_STATE (4)
+/*lint -esym(621, OKL4_WIDTH_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_WIDTH_RX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_TX_AVAILABLE_MASK) */
+#define OKL4_PIPE_STATE_TX_AVAILABLE_MASK (okl4_pipe_state_t)(1U << 5) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_MASK_TX_AVAILABLE_PIPE_STATE (okl4_pipe_state_t)(1U << 5)
+/*lint -esym(621, OKL4_SHIFT_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_SHIFT_TX_AVAILABLE_PIPE_STATE (5)
+/*lint -esym(621, OKL4_WIDTH_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_WIDTH_TX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_WAITING_MASK) */
+#define OKL4_PIPE_STATE_WAITING_MASK (okl4_pipe_state_t)(1U << 6) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_WAITING_PIPE_STATE) */
+#define OKL4_MASK_WAITING_PIPE_STATE (okl4_pipe_state_t)(1U << 6)
+/*lint -esym(621, OKL4_SHIFT_WAITING_PIPE_STATE) */
+#define OKL4_SHIFT_WAITING_PIPE_STATE (6)
+/*lint -esym(621, OKL4_WIDTH_WAITING_PIPE_STATE) */
+#define OKL4_WIDTH_WAITING_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_OVERQUOTA_MASK) */
+#define OKL4_PIPE_STATE_OVERQUOTA_MASK (okl4_pipe_state_t)(1U << 7) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OVERQUOTA_PIPE_STATE) */
+#define OKL4_MASK_OVERQUOTA_PIPE_STATE (okl4_pipe_state_t)(1U << 7)
+/*lint -esym(621, OKL4_SHIFT_OVERQUOTA_PIPE_STATE) */
+#define OKL4_SHIFT_OVERQUOTA_PIPE_STATE (7)
+/*lint -esym(621, OKL4_WIDTH_OVERQUOTA_PIPE_STATE) */
+#define OKL4_WIDTH_OVERQUOTA_PIPE_STATE (1)
+
+
+/*lint -sem(okl4_pipe_state_getreset, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getreset) */
+/*lint -esym(714, okl4_pipe_state_getreset) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getreset(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setreset, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setreset) */
+
+/*lint -esym(621, okl4_pipe_state_setreset) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setreset(okl4_pipe_state_t *x, okl4_bool_t _reset)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_reset;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_gethalted, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_gethalted) */
+/*lint -esym(714, okl4_pipe_state_gethalted) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gethalted(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_sethalted, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_sethalted) */
+
+/*lint -esym(621, okl4_pipe_state_sethalted) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_sethalted(okl4_pipe_state_t *x, okl4_bool_t _halted)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_halted;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getrxready, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getrxready) */
+/*lint -esym(714, okl4_pipe_state_getrxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxready(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setrxready, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setrxready) */
+
+/*lint -esym(621, okl4_pipe_state_setrxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxready(okl4_pipe_state_t *x, okl4_bool_t _rx_ready)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_rx_ready;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_gettxready, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_gettxready) */
+/*lint -esym(714, okl4_pipe_state_gettxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxready(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_settxready, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_settxready) */
+
+/*lint -esym(621, okl4_pipe_state_settxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxready(okl4_pipe_state_t *x, okl4_bool_t _tx_ready)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_tx_ready;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getrxavailable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getrxavailable) */
+/*lint -esym(714, okl4_pipe_state_getrxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxavailable(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setrxavailable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setrxavailable) */
+
+/*lint -esym(621, okl4_pipe_state_setrxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxavailable(okl4_pipe_state_t *x, okl4_bool_t _rx_available)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_rx_available;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_gettxavailable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_gettxavailable) */
+/*lint -esym(714, okl4_pipe_state_gettxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxavailable(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_settxavailable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_settxavailable) */
+
+/*lint -esym(621, okl4_pipe_state_settxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxavailable(okl4_pipe_state_t *x, okl4_bool_t _tx_available)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_tx_available;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getwaiting, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getwaiting) */
+/*lint -esym(714, okl4_pipe_state_getwaiting) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getwaiting(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 6;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setwaiting, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setwaiting) */
+
+/*lint -esym(621, okl4_pipe_state_setwaiting) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setwaiting(okl4_pipe_state_t *x, okl4_bool_t _waiting)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 6;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_waiting;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getoverquota, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getoverquota) */
+/*lint -esym(714, okl4_pipe_state_getoverquota) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getoverquota(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setoverquota, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setoverquota) */
+
+/*lint -esym(621, okl4_pipe_state_setoverquota) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setoverquota(okl4_pipe_state_t *x, okl4_bool_t _overquota)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_overquota;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_pipe_state_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_init(okl4_pipe_state_t *x)
+{
+    *x = (okl4_pipe_state_t)1U;
+}
+
+/*lint -esym(714, okl4_pipe_state_cast) */
+OKL4_FORCE_INLINE okl4_pipe_state_t
+okl4_pipe_state_cast(uint8_t p, okl4_bool_t force)
+{
+    okl4_pipe_state_t x = (okl4_pipe_state_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+typedef uint32_t okl4_power_state_t;
+
+/*lint -esym(621, OKL4_POWER_STATE_IDLE) */
+#define OKL4_POWER_STATE_IDLE ((okl4_power_state_t)(0U))
+
+/*lint -esym(621, OKL4_POWER_STATE_PLATFORM_BASE) */
+#define OKL4_POWER_STATE_PLATFORM_BASE ((okl4_power_state_t)(256U))
+
+/*lint -esym(621, OKL4_POWER_STATE_POWEROFF) */
+#define OKL4_POWER_STATE_POWEROFF ((okl4_power_state_t)(1U))
+
+
+
+/**
+    The okl4_priority_t type represents a thread scheduling priority.
+    Valid prioritys range from [0, CONFIG\_SCHEDULER\_NUM\_PRIOS).
+*/
+
+typedef int8_t okl4_priority_t;
+
+
+
+
+
+typedef okl4_register_t okl4_psize_pn_t;
+
+
+
+
+
+typedef okl4_register_t okl4_psize_tr_t;
+
+
+
+
+/**
+    The okl4_register_set_t type is an enumeration identifying one of
+    the register sets supported by the host machine. This includes the
+    general-purpose registers, along with other CPU-specific register
+    sets such as floating point or vector registers.
+
+    - @ref OKL4_REGISTER_SET_CPU_REGS
+    - @ref OKL4_REGISTER_SET_VFP_REGS
+    - @ref OKL4_REGISTER_SET_VFP_CTRL_REGS
+    - @ref OKL4_REGISTER_SET_VFP64_REGS
+    - @ref OKL4_REGISTER_SET_VFP128_REGS
+    - @ref OKL4_REGISTER_SET_MAX
+    - @ref OKL4_REGISTER_SET_INVALID
+*/
+
+typedef uint32_t okl4_register_set_t;
+
+/*lint -esym(621, OKL4_REGISTER_SET_CPU_REGS) */
+#define OKL4_REGISTER_SET_CPU_REGS ((okl4_register_set_t)0x0U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP_REGS) */
+#define OKL4_REGISTER_SET_VFP_REGS ((okl4_register_set_t)0x1U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP_CTRL_REGS) */
+#define OKL4_REGISTER_SET_VFP_CTRL_REGS ((okl4_register_set_t)0x2U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP64_REGS) */
+#define OKL4_REGISTER_SET_VFP64_REGS ((okl4_register_set_t)0x3U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP128_REGS) */
+#define OKL4_REGISTER_SET_VFP128_REGS ((okl4_register_set_t)0x4U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_REGISTER_SET_MAX) */
+#define OKL4_REGISTER_SET_MAX ((okl4_register_set_t)0x4U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_REGISTER_SET_INVALID) */
+#define OKL4_REGISTER_SET_INVALID ((okl4_register_set_t)0xffffffffU)
+
+/*lint -esym(714, okl4_register_set_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_register_set_is_element_of(okl4_register_set_t var);
+
+
+/*lint -esym(714, okl4_register_set_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_register_set_is_element_of(okl4_register_set_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_REGISTER_SET_CPU_REGS) ||
+            (var == OKL4_REGISTER_SET_VFP_REGS) ||
+            (var == OKL4_REGISTER_SET_VFP_CTRL_REGS) ||
+            (var == OKL4_REGISTER_SET_VFP64_REGS) ||
+            (var == OKL4_REGISTER_SET_VFP128_REGS));
+}
+
+
+
+typedef okl4_psize_t okl4_vsize_t;
+
+
+
+
+/**
+    The okl4_register_and_set_t type is a bitfield containing a register
+    set identifier of type okl4_register_set_t, and an index into that
+    register set.
+
+    - BITS 15..0 -   @ref OKL4_MASK_OFFSET_REGISTER_AND_SET
+    - BITS 31..16 -   @ref OKL4_MASK_SET_REGISTER_AND_SET
+*/
+
+/*lint -esym(621, okl4_register_and_set_t) */
+typedef uint32_t okl4_register_and_set_t;
+
+/*lint -esym(621, okl4_register_and_set_getoffset) */
+/*lint -esym(714, okl4_register_and_set_getoffset) */
+OKL4_FORCE_INLINE okl4_vsize_t
+okl4_register_and_set_getoffset(const okl4_register_and_set_t *x);
+
+/*lint -esym(621, okl4_register_and_set_setoffset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setoffset(okl4_register_and_set_t *x, okl4_vsize_t _offset);
+
+/*lint -esym(621, okl4_register_and_set_getset) */
+/*lint -esym(714, okl4_register_and_set_getset) */
+OKL4_FORCE_INLINE okl4_register_set_t
+okl4_register_and_set_getset(const okl4_register_and_set_t *x);
+
+/*lint -esym(621, okl4_register_and_set_setset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setset(okl4_register_and_set_t *x, okl4_register_set_t _set);
+
+/*lint -esym(714, okl4_register_and_set_init) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_init(okl4_register_and_set_t *x);
+
+/*lint -esym(714, okl4_register_and_set_cast) */
+OKL4_FORCE_INLINE okl4_register_and_set_t
+okl4_register_and_set_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_REGISTER_AND_SET_OFFSET_MASK) */
+#define OKL4_REGISTER_AND_SET_OFFSET_MASK ((okl4_register_and_set_t)65535U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OFFSET_REGISTER_AND_SET) */
+#define OKL4_MASK_OFFSET_REGISTER_AND_SET ((okl4_register_and_set_t)65535U)
+/*lint -esym(621, OKL4_SHIFT_OFFSET_REGISTER_AND_SET) */
+#define OKL4_SHIFT_OFFSET_REGISTER_AND_SET (0)
+/*lint -esym(621, OKL4_WIDTH_OFFSET_REGISTER_AND_SET) */
+#define OKL4_WIDTH_OFFSET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_REGISTER_AND_SET_SET_MASK) */
+#define OKL4_REGISTER_AND_SET_SET_MASK ((okl4_register_and_set_t)65535U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SET_REGISTER_AND_SET) */
+#define OKL4_MASK_SET_REGISTER_AND_SET ((okl4_register_and_set_t)65535U << 16)
+/*lint -esym(621, OKL4_SHIFT_SET_REGISTER_AND_SET) */
+#define OKL4_SHIFT_SET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_WIDTH_SET_REGISTER_AND_SET) */
+#define OKL4_WIDTH_SET_REGISTER_AND_SET (16)
+
+
+/*lint -sem(okl4_register_and_set_getoffset, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_register_and_set_getoffset) */
+/*lint -esym(714, okl4_register_and_set_getoffset) */
+OKL4_FORCE_INLINE okl4_vsize_t
+okl4_register_and_set_getoffset(const okl4_register_and_set_t *x)
+{
+    okl4_vsize_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_register_and_set_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_vsize_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_register_and_set_setoffset, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_register_and_set_setoffset) */
+
+/*lint -esym(621, okl4_register_and_set_setoffset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setoffset(okl4_register_and_set_t *x, okl4_vsize_t _offset)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_register_and_set_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_offset;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_register_and_set_getset, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_register_and_set_getset) */
+/*lint -esym(714, okl4_register_and_set_getset) */
+OKL4_FORCE_INLINE okl4_register_set_t
+okl4_register_and_set_getset(const okl4_register_and_set_t *x)
+{
+    okl4_register_set_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        okl4_register_and_set_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_register_set_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_register_and_set_setset, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_register_and_set_setset) */
+
+/*lint -esym(621, okl4_register_and_set_setset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setset(okl4_register_and_set_t *x, okl4_register_set_t _set)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        okl4_register_and_set_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_set;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_register_and_set_init) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_init(okl4_register_and_set_t *x)
+{
+    *x = (okl4_register_and_set_t)0U;
+}
+
+/*lint -esym(714, okl4_register_and_set_cast) */
+OKL4_FORCE_INLINE okl4_register_and_set_t
+okl4_register_and_set_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_register_and_set_t x = (okl4_register_and_set_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct okl4_cpu_registers {
+    okl4_register_t x[31];
+    okl4_register_t sp_el0;
+    okl4_register_t ip;
+    uint32_t cpsr;
+    _okl4_padding_t __padding0_4; /**< Padding 8 */
+    _okl4_padding_t __padding1_5; /**< Padding 8 */
+    _okl4_padding_t __padding2_6; /**< Padding 8 */
+    _okl4_padding_t __padding3_7; /**< Padding 8 */
+    okl4_register_t sp_EL1;
+    okl4_register_t elr_EL1;
+    uint32_t spsr_EL1;
+    uint32_t spsr_abt;
+    uint32_t spsr_und;
+    uint32_t spsr_irq;
+    uint32_t spsr_fiq;
+    uint32_t csselr_EL1;
+    okl4_arm_sctlr_t sctlr_EL1;
+    uint32_t cpacr_EL1;
+    uint64_t ttbr0_EL1;
+    uint64_t ttbr1_EL1;
+    uint64_t tcr_EL1;
+    uint32_t dacr32_EL2;
+    uint32_t ifsr32_EL2;
+    uint32_t esr_EL1;
+    _okl4_padding_t __padding4_4; /**< Padding 8 */
+    _okl4_padding_t __padding5_5; /**< Padding 8 */
+    _okl4_padding_t __padding6_6; /**< Padding 8 */
+    _okl4_padding_t __padding7_7; /**< Padding 8 */
+    uint64_t far_EL1;
+    uint64_t par_EL1;
+    uint64_t mair_EL1;
+    uint64_t vbar_EL1;
+    uint32_t contextidr_EL1;
+    _okl4_padding_t __padding8_4; /**< Padding 8 */
+    _okl4_padding_t __padding9_5; /**< Padding 8 */
+    _okl4_padding_t __padding10_6; /**< Padding 8 */
+    _okl4_padding_t __padding11_7; /**< Padding 8 */
+    uint64_t tpidr_EL1;
+    uint64_t tpidrro_EL0;
+    uint64_t tpidr_EL0;
+    uint32_t pmcr_EL0;
+    _okl4_padding_t __padding12_4; /**< Padding 8 */
+    _okl4_padding_t __padding13_5; /**< Padding 8 */
+    _okl4_padding_t __padding14_6; /**< Padding 8 */
+    _okl4_padding_t __padding15_7; /**< Padding 8 */
+    uint64_t pmccntr_EL0;
+    uint32_t fpexc32_EL2;
+    uint32_t cntkctl_EL1;
+};
+
+
+
+
+
+
+/**
+    The okl4_cpu_registers_t type represents a set of CPU general-purpose
+    registers on the native machine.
+*/
+
+typedef struct okl4_cpu_registers okl4_cpu_registers_t;
+
+
+
+
+/**
+    The `okl4_rights_t` type represents a set of operations that are allowed to
+    be performed using a given cap.
+*/
+
+typedef uint32_t okl4_rights_t;
+
+
+
+
+
+typedef uint64_t okl4_soc_time_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_schedule_profile_data {
+    okl4_soc_time_t timestamp;
+    okl4_soc_time_t cpu_time;
+    okl4_count_t context_switches;
+    okl4_count_t cpu_migrations;
+    okl4_count_t cpu_hwirqs;
+    okl4_count_t cpu_virqs;
+};
+
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS
+*/
+
+/*lint -esym(621, okl4_scheduler_virq_flags_t) */
+typedef okl4_virq_flags_t okl4_scheduler_virq_flags_t;
+
+/*lint -esym(621, okl4_scheduler_virq_flags_getpowersuspended) */
+/*lint -esym(714, okl4_scheduler_virq_flags_getpowersuspended) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_scheduler_virq_flags_getpowersuspended(const okl4_scheduler_virq_flags_t *x);
+
+/*lint -esym(621, okl4_scheduler_virq_flags_setpowersuspended) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_setpowersuspended(okl4_scheduler_virq_flags_t *x, okl4_bool_t _power_suspended);
+
+/*lint -esym(714, okl4_scheduler_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_init(okl4_scheduler_virq_flags_t *x);
+
+/*lint -esym(714, okl4_scheduler_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_scheduler_virq_flags_t
+okl4_scheduler_virq_flags_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_SCHEDULER_VIRQ_FLAGS_POWER_SUSPENDED_MASK) */
+#define OKL4_SCHEDULER_VIRQ_FLAGS_POWER_SUSPENDED_MASK ((okl4_scheduler_virq_flags_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS ((okl4_scheduler_virq_flags_t)1U)
+/*lint -esym(621, OKL4_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (1)
+
+
+/*lint -sem(okl4_scheduler_virq_flags_getpowersuspended, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_scheduler_virq_flags_getpowersuspended) */
+/*lint -esym(714, okl4_scheduler_virq_flags_getpowersuspended) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_scheduler_virq_flags_getpowersuspended(const okl4_scheduler_virq_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_scheduler_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_scheduler_virq_flags_setpowersuspended, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_scheduler_virq_flags_setpowersuspended) */
+
+/*lint -esym(621, okl4_scheduler_virq_flags_setpowersuspended) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_setpowersuspended(okl4_scheduler_virq_flags_t *x, okl4_bool_t _power_suspended)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_scheduler_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_power_suspended;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_scheduler_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_init(okl4_scheduler_virq_flags_t *x)
+{
+    *x = (okl4_scheduler_virq_flags_t)0U;
+}
+
+/*lint -esym(714, okl4_scheduler_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_scheduler_virq_flags_t
+okl4_scheduler_virq_flags_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_scheduler_virq_flags_t x = (okl4_scheduler_virq_flags_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    The `okl4_scount_t` type represents a natural number of items or
+    iterations. Negative values represent errors. Use `okl4_count_t` if error
+    values are not required.
+*/
+
+typedef int32_t okl4_scount_t;
+
+
+
+
+/**
+    The SDK_VERSION contains a global SDK wide versioning of software.
+
+    - BITS 5..0 -   @ref OKL4_MASK_MAINTENANCE_SDK_VERSION
+    - BITS 15..8 -   @ref OKL4_MASK_RELEASE_SDK_VERSION
+    - BITS 21..16 -   @ref OKL4_MASK_MINOR_SDK_VERSION
+    - BITS 27..24 -   @ref OKL4_MASK_MAJOR_SDK_VERSION
+    - BIT 28 -   @ref OKL4_MASK_RES0_FLAG_SDK_VERSION
+    - BIT 30 -   @ref OKL4_MASK_DEV_FLAG_SDK_VERSION
+    - BIT 31 -   @ref OKL4_MASK_FORMAT_FLAG_SDK_VERSION
+*/
+
+/*lint -esym(621, okl4_sdk_version_t) */
+typedef uint32_t okl4_sdk_version_t;
+
+/*lint -esym(621, okl4_sdk_version_getformatflag) */
+/*lint -esym(714, okl4_sdk_version_getformatflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getformatflag(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setformatflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setformatflag(okl4_sdk_version_t *x, uint32_t _format_flag);
+
+/*lint -esym(621, okl4_sdk_version_getdevflag) */
+/*lint -esym(714, okl4_sdk_version_getdevflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getdevflag(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setdevflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setdevflag(okl4_sdk_version_t *x, uint32_t _dev_flag);
+
+/*lint -esym(621, okl4_sdk_version_getres0flag) */
+/*lint -esym(714, okl4_sdk_version_getres0flag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getres0flag(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setres0flag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setres0flag(okl4_sdk_version_t *x, uint32_t _res0_flag);
+
+/*lint -esym(621, okl4_sdk_version_getmajor) */
+/*lint -esym(714, okl4_sdk_version_getmajor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmajor(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setmajor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmajor(okl4_sdk_version_t *x, uint32_t _major);
+
+/*lint -esym(621, okl4_sdk_version_getminor) */
+/*lint -esym(714, okl4_sdk_version_getminor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getminor(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setminor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setminor(okl4_sdk_version_t *x, uint32_t _minor);
+
+/*lint -esym(621, okl4_sdk_version_getrelease) */
+/*lint -esym(714, okl4_sdk_version_getrelease) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getrelease(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setrelease) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setrelease(okl4_sdk_version_t *x, uint32_t _release);
+
+/*lint -esym(621, okl4_sdk_version_getmaintenance) */
+/*lint -esym(714, okl4_sdk_version_getmaintenance) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmaintenance(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setmaintenance) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmaintenance(okl4_sdk_version_t *x, uint32_t _maintenance);
+
+/*lint -esym(714, okl4_sdk_version_init) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_init(okl4_sdk_version_t *x);
+
+/*lint -esym(714, okl4_sdk_version_cast) */
+OKL4_FORCE_INLINE okl4_sdk_version_t
+okl4_sdk_version_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_SDK_VERSION_MAINTENANCE_MASK) */
+#define OKL4_SDK_VERSION_MAINTENANCE_MASK ((okl4_sdk_version_t)63U) /* Deprecated */
+/** Maintenance number */
+/*lint -esym(621, OKL4_MASK_MAINTENANCE_SDK_VERSION) */
+#define OKL4_MASK_MAINTENANCE_SDK_VERSION ((okl4_sdk_version_t)63U)
+/*lint -esym(621, OKL4_SHIFT_MAINTENANCE_SDK_VERSION) */
+#define OKL4_SHIFT_MAINTENANCE_SDK_VERSION (0)
+/*lint -esym(621, OKL4_WIDTH_MAINTENANCE_SDK_VERSION) */
+#define OKL4_WIDTH_MAINTENANCE_SDK_VERSION (6)
+/*lint -esym(621, OKL4_SDK_VERSION_RELEASE_MASK) */
+#define OKL4_SDK_VERSION_RELEASE_MASK ((okl4_sdk_version_t)255U << 8) /* Deprecated */
+/** SDK Release Number */
+/*lint -esym(621, OKL4_MASK_RELEASE_SDK_VERSION) */
+#define OKL4_MASK_RELEASE_SDK_VERSION ((okl4_sdk_version_t)255U << 8)
+/*lint -esym(621, OKL4_SHIFT_RELEASE_SDK_VERSION) */
+#define OKL4_SHIFT_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_WIDTH_RELEASE_SDK_VERSION) */
+#define OKL4_WIDTH_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_SDK_VERSION_MINOR_MASK) */
+#define OKL4_SDK_VERSION_MINOR_MASK ((okl4_sdk_version_t)63U << 16) /* Deprecated */
+/** SDK Minor Number */
+/*lint -esym(621, OKL4_MASK_MINOR_SDK_VERSION) */
+#define OKL4_MASK_MINOR_SDK_VERSION ((okl4_sdk_version_t)63U << 16)
+/*lint -esym(621, OKL4_SHIFT_MINOR_SDK_VERSION) */
+#define OKL4_SHIFT_MINOR_SDK_VERSION (16)
+/*lint -esym(621, OKL4_WIDTH_MINOR_SDK_VERSION) */
+#define OKL4_WIDTH_MINOR_SDK_VERSION (6)
+/*lint -esym(621, OKL4_SDK_VERSION_MAJOR_MASK) */
+#define OKL4_SDK_VERSION_MAJOR_MASK ((okl4_sdk_version_t)15U << 24) /* Deprecated */
+/** SDK Major Number */
+/*lint -esym(621, OKL4_MASK_MAJOR_SDK_VERSION) */
+#define OKL4_MASK_MAJOR_SDK_VERSION ((okl4_sdk_version_t)15U << 24)
+/*lint -esym(621, OKL4_SHIFT_MAJOR_SDK_VERSION) */
+#define OKL4_SHIFT_MAJOR_SDK_VERSION (24)
+/*lint -esym(621, OKL4_WIDTH_MAJOR_SDK_VERSION) */
+#define OKL4_WIDTH_MAJOR_SDK_VERSION (4)
+/*lint -esym(621, OKL4_SDK_VERSION_RES0_FLAG_MASK) */
+#define OKL4_SDK_VERSION_RES0_FLAG_MASK ((okl4_sdk_version_t)1U << 28) /* Deprecated */
+/** Reserved */
+/*lint -esym(621, OKL4_MASK_RES0_FLAG_SDK_VERSION) */
+#define OKL4_MASK_RES0_FLAG_SDK_VERSION ((okl4_sdk_version_t)1U << 28)
+/*lint -esym(621, OKL4_SHIFT_RES0_FLAG_SDK_VERSION) */
+#define OKL4_SHIFT_RES0_FLAG_SDK_VERSION (28)
+/*lint -esym(621, OKL4_WIDTH_RES0_FLAG_SDK_VERSION) */
+#define OKL4_WIDTH_RES0_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_SDK_VERSION_DEV_FLAG_MASK) */
+#define OKL4_SDK_VERSION_DEV_FLAG_MASK ((okl4_sdk_version_t)1U << 30) /* Deprecated */
+/** Unreleased internal development version */
+/*lint -esym(621, OKL4_MASK_DEV_FLAG_SDK_VERSION) */
+#define OKL4_MASK_DEV_FLAG_SDK_VERSION ((okl4_sdk_version_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_DEV_FLAG_SDK_VERSION) */
+#define OKL4_SHIFT_DEV_FLAG_SDK_VERSION (30)
+/*lint -esym(621, OKL4_WIDTH_DEV_FLAG_SDK_VERSION) */
+#define OKL4_WIDTH_DEV_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_SDK_VERSION_FORMAT_FLAG_MASK) */
+#define OKL4_SDK_VERSION_FORMAT_FLAG_MASK ((okl4_sdk_version_t)1U << 31) /* Deprecated */
+/** Format: 0 = Version format 1, 1 = Reserved */
+/*lint -esym(621, OKL4_MASK_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_MASK_FORMAT_FLAG_SDK_VERSION ((okl4_sdk_version_t)1U << 31)
+/*lint -esym(621, OKL4_SHIFT_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_SHIFT_FORMAT_FLAG_SDK_VERSION (31)
+/*lint -esym(621, OKL4_WIDTH_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_WIDTH_FORMAT_FLAG_SDK_VERSION (1)
+
+
+/*lint -sem(okl4_sdk_version_getmaintenance, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, okl4_sdk_version_getmaintenance) */
+/*lint -esym(714, okl4_sdk_version_getmaintenance) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmaintenance(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 6;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setmaintenance, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, okl4_sdk_version_setmaintenance) */
+
+/*lint -esym(621, okl4_sdk_version_setmaintenance) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmaintenance(okl4_sdk_version_t *x, uint32_t _maintenance)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 6;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_maintenance;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getrelease, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_sdk_version_getrelease) */
+/*lint -esym(714, okl4_sdk_version_getrelease) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getrelease(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            uint32_t field : 8;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setrelease, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_sdk_version_setrelease) */
+
+/*lint -esym(621, okl4_sdk_version_setrelease) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setrelease(okl4_sdk_version_t *x, uint32_t _release)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            uint32_t field : 8;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_release;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getminor, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, okl4_sdk_version_getminor) */
+/*lint -esym(714, okl4_sdk_version_getminor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getminor(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 6;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setminor, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, okl4_sdk_version_setminor) */
+
+/*lint -esym(621, okl4_sdk_version_setminor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setminor(okl4_sdk_version_t *x, uint32_t _minor)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 6;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_minor;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getmajor, 1p, @n >= 0 && @n <= 15) */
+/*lint -esym(621, okl4_sdk_version_getmajor) */
+/*lint -esym(714, okl4_sdk_version_getmajor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmajor(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 4;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setmajor, 2n >= 0 && 2n <= 15) */
+/*lint -esym(714, okl4_sdk_version_setmajor) */
+
+/*lint -esym(621, okl4_sdk_version_setmajor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmajor(okl4_sdk_version_t *x, uint32_t _major)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 4;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_major;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getres0flag, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_sdk_version_getres0flag) */
+/*lint -esym(714, okl4_sdk_version_getres0flag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getres0flag(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setres0flag, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_sdk_version_setres0flag) */
+
+/*lint -esym(621, okl4_sdk_version_setres0flag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setres0flag(okl4_sdk_version_t *x, uint32_t _res0_flag)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_res0_flag;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getdevflag, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_sdk_version_getdevflag) */
+/*lint -esym(714, okl4_sdk_version_getdevflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getdevflag(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setdevflag, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_sdk_version_setdevflag) */
+
+/*lint -esym(621, okl4_sdk_version_setdevflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setdevflag(okl4_sdk_version_t *x, uint32_t _dev_flag)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_dev_flag;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getformatflag, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_sdk_version_getformatflag) */
+/*lint -esym(714, okl4_sdk_version_getformatflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getformatflag(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 31;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setformatflag, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_sdk_version_setformatflag) */
+
+/*lint -esym(621, okl4_sdk_version_setformatflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setformatflag(okl4_sdk_version_t *x, uint32_t _format_flag)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 31;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_format_flag;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_sdk_version_init) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_init(okl4_sdk_version_t *x)
+{
+    *x = (okl4_sdk_version_t)0U;
+}
+
+/*lint -esym(714, okl4_sdk_version_cast) */
+OKL4_FORCE_INLINE okl4_sdk_version_t
+okl4_sdk_version_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_sdk_version_t x = (okl4_sdk_version_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+
+*/
+
+struct okl4_shared_buffer {
+    okl4_paddr_t physical_base;
+    struct okl4_virtmem_item virtmem_item;
+    okl4_kcap_t cap;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_shared_buffers_array {
+    __ptr64(struct okl4_shared_buffer *, buffers);
+    okl4_count_t num_buffers;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+
+typedef okl4_kcap_t okl4_signal_t;
+
+
+
+
+
+
+
+
+/**
+    The `okl4_sregister_t` type represents a signed, machine-native
+    register-sized integer value.
+*/
+
+typedef int64_t okl4_sregister_t;
+
+
+
+
+
+typedef uint64_t okl4_ticks_t;
+
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_ACTIVE_TIMER_FLAGS
+    - BIT 1 -   @ref OKL4_MASK_PERIODIC_TIMER_FLAGS
+    - BIT 2 -   @ref OKL4_MASK_ABSOLUTE_TIMER_FLAGS
+    - BIT 3 -   @ref OKL4_MASK_UNITS_TIMER_FLAGS
+    - BIT 4 -   @ref OKL4_MASK_ALIGN_TIMER_FLAGS
+    - BIT 5 -   @ref OKL4_MASK_WATCHDOG_TIMER_FLAGS
+    - BIT 30 -   @ref OKL4_MASK_RELOAD_TIMER_FLAGS
+    - BIT 31 -   @ref OKL4_MASK_TIMESLICE_TIMER_FLAGS
+*/
+
+/*lint -esym(621, okl4_timer_flags_t) */
+typedef uint32_t okl4_timer_flags_t;
+
+/*lint -esym(621, okl4_timer_flags_getactive) */
+/*lint -esym(714, okl4_timer_flags_getactive) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getactive(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setactive) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setactive(okl4_timer_flags_t *x, okl4_bool_t _active);
+
+/*lint -esym(621, okl4_timer_flags_getperiodic) */
+/*lint -esym(714, okl4_timer_flags_getperiodic) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getperiodic(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setperiodic) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setperiodic(okl4_timer_flags_t *x, okl4_bool_t _periodic);
+
+/*lint -esym(621, okl4_timer_flags_getabsolute) */
+/*lint -esym(714, okl4_timer_flags_getabsolute) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getabsolute(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setabsolute) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setabsolute(okl4_timer_flags_t *x, okl4_bool_t _absolute);
+
+/*lint -esym(621, okl4_timer_flags_getunits) */
+/*lint -esym(714, okl4_timer_flags_getunits) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getunits(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setunits) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setunits(okl4_timer_flags_t *x, okl4_bool_t _units);
+
+/*lint -esym(621, okl4_timer_flags_getalign) */
+/*lint -esym(714, okl4_timer_flags_getalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getalign(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setalign) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setalign(okl4_timer_flags_t *x, okl4_bool_t _align);
+
+/*lint -esym(621, okl4_timer_flags_getwatchdog) */
+/*lint -esym(714, okl4_timer_flags_getwatchdog) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getwatchdog(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setwatchdog) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setwatchdog(okl4_timer_flags_t *x, okl4_bool_t _watchdog);
+
+/*lint -esym(621, okl4_timer_flags_getreload) */
+/*lint -esym(714, okl4_timer_flags_getreload) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getreload(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setreload) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setreload(okl4_timer_flags_t *x, okl4_bool_t _reload);
+
+/*lint -esym(621, okl4_timer_flags_gettimeslice) */
+/*lint -esym(714, okl4_timer_flags_gettimeslice) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_gettimeslice(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_settimeslice) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_settimeslice(okl4_timer_flags_t *x, okl4_bool_t _timeslice);
+
+/*lint -esym(714, okl4_timer_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_init(okl4_timer_flags_t *x);
+
+/*lint -esym(714, okl4_timer_flags_cast) */
+OKL4_FORCE_INLINE okl4_timer_flags_t
+okl4_timer_flags_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_TIMER_FLAGS_ACTIVE_MASK) */
+#define OKL4_TIMER_FLAGS_ACTIVE_MASK ((okl4_timer_flags_t)1U) /* Deprecated */
+/** Indicates that the timer has a timeout set */
+/*lint -esym(621, OKL4_MASK_ACTIVE_TIMER_FLAGS) */
+#define OKL4_MASK_ACTIVE_TIMER_FLAGS ((okl4_timer_flags_t)1U)
+/*lint -esym(621, OKL4_SHIFT_ACTIVE_TIMER_FLAGS) */
+#define OKL4_SHIFT_ACTIVE_TIMER_FLAGS (0)
+/*lint -esym(621, OKL4_WIDTH_ACTIVE_TIMER_FLAGS) */
+#define OKL4_WIDTH_ACTIVE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_PERIODIC_MASK) */
+#define OKL4_TIMER_FLAGS_PERIODIC_MASK ((okl4_timer_flags_t)1U << 1) /* Deprecated */
+/** Indicates that the timer is periodic, otherwise it is one-shot */
+/*lint -esym(621, OKL4_MASK_PERIODIC_TIMER_FLAGS) */
+#define OKL4_MASK_PERIODIC_TIMER_FLAGS ((okl4_timer_flags_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_PERIODIC_TIMER_FLAGS) */
+#define OKL4_SHIFT_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_WIDTH_PERIODIC_TIMER_FLAGS) */
+#define OKL4_WIDTH_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_ABSOLUTE_MASK) */
+#define OKL4_TIMER_FLAGS_ABSOLUTE_MASK ((okl4_timer_flags_t)1U << 2) /* Deprecated */
+/** Indicates that the timeout value is absolute, otherwise it is relative */
+/*lint -esym(621, OKL4_MASK_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_MASK_ABSOLUTE_TIMER_FLAGS ((okl4_timer_flags_t)1U << 2)
+/*lint -esym(621, OKL4_SHIFT_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_SHIFT_ABSOLUTE_TIMER_FLAGS (2)
+/*lint -esym(621, OKL4_WIDTH_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_WIDTH_ABSOLUTE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_UNITS_MASK) */
+#define OKL4_TIMER_FLAGS_UNITS_MASK ((okl4_timer_flags_t)1U << 3) /* Deprecated */
+/** Select time in UNITS of raw ticks */
+/*lint -esym(621, OKL4_MASK_UNITS_TIMER_FLAGS) */
+#define OKL4_MASK_UNITS_TIMER_FLAGS ((okl4_timer_flags_t)1U << 3)
+/*lint -esym(621, OKL4_SHIFT_UNITS_TIMER_FLAGS) */
+#define OKL4_SHIFT_UNITS_TIMER_FLAGS (3)
+/*lint -esym(621, OKL4_WIDTH_UNITS_TIMER_FLAGS) */
+#define OKL4_WIDTH_UNITS_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_ALIGN_MASK) */
+#define OKL4_TIMER_FLAGS_ALIGN_MASK ((okl4_timer_flags_t)1U << 4) /* Deprecated */
+/** Align first timeout of a periodic timer to a multiple of the timeout length */
+/*lint -esym(621, OKL4_MASK_ALIGN_TIMER_FLAGS) */
+#define OKL4_MASK_ALIGN_TIMER_FLAGS ((okl4_timer_flags_t)1U << 4)
+/*lint -esym(621, OKL4_SHIFT_ALIGN_TIMER_FLAGS) */
+#define OKL4_SHIFT_ALIGN_TIMER_FLAGS (4)
+/*lint -esym(621, OKL4_WIDTH_ALIGN_TIMER_FLAGS) */
+#define OKL4_WIDTH_ALIGN_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_WATCHDOG_MASK) */
+#define OKL4_TIMER_FLAGS_WATCHDOG_MASK ((okl4_timer_flags_t)1U << 5) /* Deprecated */
+/** Enter the kernel interactive debugger on timer expiry (no effect for production builds of the kernel) */
+/*lint -esym(621, OKL4_MASK_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_MASK_WATCHDOG_TIMER_FLAGS ((okl4_timer_flags_t)1U << 5)
+/*lint -esym(621, OKL4_SHIFT_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_SHIFT_WATCHDOG_TIMER_FLAGS (5)
+/*lint -esym(621, OKL4_WIDTH_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_WIDTH_WATCHDOG_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_RELOAD_MASK) */
+#define OKL4_TIMER_FLAGS_RELOAD_MASK ((okl4_timer_flags_t)1U << 30) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RELOAD_TIMER_FLAGS) */
+#define OKL4_MASK_RELOAD_TIMER_FLAGS ((okl4_timer_flags_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_RELOAD_TIMER_FLAGS) */
+#define OKL4_SHIFT_RELOAD_TIMER_FLAGS (30)
+/*lint -esym(621, OKL4_WIDTH_RELOAD_TIMER_FLAGS) */
+#define OKL4_WIDTH_RELOAD_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_TIMESLICE_MASK) */
+#define OKL4_TIMER_FLAGS_TIMESLICE_MASK ((okl4_timer_flags_t)1U << 31) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_MASK_TIMESLICE_TIMER_FLAGS ((okl4_timer_flags_t)1U << 31)
+/*lint -esym(621, OKL4_SHIFT_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_SHIFT_TIMESLICE_TIMER_FLAGS (31)
+/*lint -esym(621, OKL4_WIDTH_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_WIDTH_TIMESLICE_TIMER_FLAGS (1)
+
+
+/*lint -sem(okl4_timer_flags_getactive, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getactive) */
+/*lint -esym(714, okl4_timer_flags_getactive) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getactive(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setactive, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setactive) */
+
+/*lint -esym(621, okl4_timer_flags_setactive) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setactive(okl4_timer_flags_t *x, okl4_bool_t _active)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_active;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getperiodic, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getperiodic) */
+/*lint -esym(714, okl4_timer_flags_getperiodic) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getperiodic(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setperiodic, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setperiodic) */
+
+/*lint -esym(621, okl4_timer_flags_setperiodic) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setperiodic(okl4_timer_flags_t *x, okl4_bool_t _periodic)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_periodic;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getabsolute, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getabsolute) */
+/*lint -esym(714, okl4_timer_flags_getabsolute) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getabsolute(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setabsolute, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setabsolute) */
+
+/*lint -esym(621, okl4_timer_flags_setabsolute) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setabsolute(okl4_timer_flags_t *x, okl4_bool_t _absolute)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_absolute;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getunits, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getunits) */
+/*lint -esym(714, okl4_timer_flags_getunits) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getunits(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setunits, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setunits) */
+
+/*lint -esym(621, okl4_timer_flags_setunits) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setunits(okl4_timer_flags_t *x, okl4_bool_t _units)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_units;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getalign, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getalign) */
+/*lint -esym(714, okl4_timer_flags_getalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getalign(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setalign, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setalign) */
+
+/*lint -esym(621, okl4_timer_flags_setalign) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setalign(okl4_timer_flags_t *x, okl4_bool_t _align)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_align;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getwatchdog, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getwatchdog) */
+/*lint -esym(714, okl4_timer_flags_getwatchdog) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getwatchdog(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setwatchdog, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setwatchdog) */
+
+/*lint -esym(621, okl4_timer_flags_setwatchdog) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setwatchdog(okl4_timer_flags_t *x, okl4_bool_t _watchdog)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_watchdog;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getreload, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getreload) */
+/*lint -esym(714, okl4_timer_flags_getreload) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getreload(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setreload, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setreload) */
+
+/*lint -esym(621, okl4_timer_flags_setreload) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setreload(okl4_timer_flags_t *x, okl4_bool_t _reload)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_reload;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_gettimeslice, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_gettimeslice) */
+/*lint -esym(714, okl4_timer_flags_gettimeslice) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_gettimeslice(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 31;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_settimeslice, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_settimeslice) */
+
+/*lint -esym(621, okl4_timer_flags_settimeslice) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_settimeslice(okl4_timer_flags_t *x, okl4_bool_t _timeslice)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 31;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_timeslice;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_timer_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_init(okl4_timer_flags_t *x)
+{
+    *x = (okl4_timer_flags_t)0U;
+}
+
+/*lint -esym(714, okl4_timer_flags_cast) */
+OKL4_FORCE_INLINE okl4_timer_flags_t
+okl4_timer_flags_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_timer_flags_t x = (okl4_timer_flags_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct _okl4_tracebuffer_buffer_header {
+    okl4_soc_time_t timestamp;
+    okl4_count_t wrap;
+    _okl4_padding_t __padding0_4; /**< Padding 8 */
+    _okl4_padding_t __padding1_5; /**< Padding 8 */
+    _okl4_padding_t __padding2_6; /**< Padding 8 */
+    _okl4_padding_t __padding3_7; /**< Padding 8 */
+    okl4_ksize_t size;
+    okl4_ksize_t head;
+    okl4_ksize_t offset;
+};
+
+
+
+
+
+
+/**
+
+*/
+
+struct okl4_tracebuffer_env {
+    struct okl4_virtmem_item virt;
+    okl4_interrupt_number_t virq;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+
+struct _okl4_tracebuffer_header {
+    uint32_t magic;
+    uint32_t version;
+    uint32_t id;
+    okl4_count_t num_buffers;
+    okl4_ksize_t buffer_size;
+    okl4_atomic_uint32_t log_mask;
+    okl4_atomic_uint32_t active_buffer;
+    okl4_atomic_uint32_t grabbed_buffer;
+    okl4_atomic_uint32_t empty_buffers;
+    struct _okl4_tracebuffer_buffer_header buffers[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+
+
+typedef uint32_t okl4_tracepoint_class_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_THREAD_STATE) */
+#define OKL4_TRACEPOINT_CLASS_THREAD_STATE ((okl4_tracepoint_class_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_SYSCALLS) */
+#define OKL4_TRACEPOINT_CLASS_SYSCALLS ((okl4_tracepoint_class_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_PRIMARY) */
+#define OKL4_TRACEPOINT_CLASS_PRIMARY ((okl4_tracepoint_class_t)0x2U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_SECONDARY) */
+#define OKL4_TRACEPOINT_CLASS_SECONDARY ((okl4_tracepoint_class_t)0x3U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_TERTIARY) */
+#define OKL4_TRACEPOINT_CLASS_TERTIARY ((okl4_tracepoint_class_t)0x4U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_MAX) */
+#define OKL4_TRACEPOINT_CLASS_MAX ((okl4_tracepoint_class_t)0x4U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_INVALID) */
+#define OKL4_TRACEPOINT_CLASS_INVALID ((okl4_tracepoint_class_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_class_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_class_is_element_of(okl4_tracepoint_class_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_class_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_class_is_element_of(okl4_tracepoint_class_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_TRACEPOINT_CLASS_THREAD_STATE) ||
+            (var == OKL4_TRACEPOINT_CLASS_SYSCALLS) ||
+            (var == OKL4_TRACEPOINT_CLASS_PRIMARY) ||
+            (var == OKL4_TRACEPOINT_CLASS_SECONDARY) ||
+            (var == OKL4_TRACEPOINT_CLASS_TERTIARY));
+}
+
+
+/**
+    - BITS 7..0 -   @ref _OKL4_MASK_ID_TRACEPOINT_DESC
+    - BIT 8 -   @ref _OKL4_MASK_USER_TRACEPOINT_DESC
+    - BIT 9 -   @ref _OKL4_MASK_BIN_TRACEPOINT_DESC
+    - BITS 15..10 -   @ref _OKL4_MASK_RECLEN_TRACEPOINT_DESC
+    - BITS 21..16 -   @ref _OKL4_MASK_CPUID_TRACEPOINT_DESC
+    - BITS 27..22 -   @ref _OKL4_MASK_THREADID_TRACEPOINT_DESC
+    - BITS 31..28 -   @ref _OKL4_MASK__R1_TRACEPOINT_DESC
+*/
+
+/*lint -esym(621, _okl4_tracepoint_desc_t) */
+typedef uint32_t _okl4_tracepoint_desc_t;
+
+/*lint -esym(621, _okl4_tracepoint_desc_getid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getid(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setid(_okl4_tracepoint_desc_t *x, uint32_t _id);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getuser) */
+/*lint -esym(714, _okl4_tracepoint_desc_getuser) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getuser(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setuser) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setuser(_okl4_tracepoint_desc_t *x, okl4_bool_t _user);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getbin) */
+/*lint -esym(714, _okl4_tracepoint_desc_getbin) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getbin(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setbin) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setbin(_okl4_tracepoint_desc_t *x, okl4_bool_t _bin);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getreclen) */
+/*lint -esym(714, _okl4_tracepoint_desc_getreclen) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getreclen(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setreclen) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setreclen(_okl4_tracepoint_desc_t *x, uint32_t _reclen);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getcpuid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getcpuid) */
+OKL4_FORCE_INLINE okl4_count_t
+_okl4_tracepoint_desc_getcpuid(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setcpuid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setcpuid(_okl4_tracepoint_desc_t *x, okl4_count_t _cpuid);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getthreadid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getthreadid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getthreadid(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setthreadid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setthreadid(_okl4_tracepoint_desc_t *x, uint32_t _threadid);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getr1) */
+/*lint -esym(714, _okl4_tracepoint_desc_getr1) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getr1(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setr1) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setr1(_okl4_tracepoint_desc_t *x, uint32_t __r1);
+
+/*lint -esym(714, _okl4_tracepoint_desc_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_init(_okl4_tracepoint_desc_t *x);
+
+/*lint -esym(714, _okl4_tracepoint_desc_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_desc_t
+_okl4_tracepoint_desc_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_ID_MASK) */
+#define _OKL4_TRACEPOINT_DESC_ID_MASK ((_okl4_tracepoint_desc_t)255U) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_ID_TRACEPOINT_DESC) */
+#define _OKL4_MASK_ID_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)255U)
+/*lint -esym(621, _OKL4_SHIFT_ID_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_ID_TRACEPOINT_DESC (0)
+/*lint -esym(621, _OKL4_WIDTH_ID_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_ID_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_USER_MASK) */
+#define _OKL4_TRACEPOINT_DESC_USER_MASK ((_okl4_tracepoint_desc_t)1U << 8) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_USER_TRACEPOINT_DESC) */
+#define _OKL4_MASK_USER_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)1U << 8)
+/*lint -esym(621, _OKL4_SHIFT_USER_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_USER_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_WIDTH_USER_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_USER_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_BIN_MASK) */
+#define _OKL4_TRACEPOINT_DESC_BIN_MASK ((_okl4_tracepoint_desc_t)1U << 9) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_BIN_TRACEPOINT_DESC) */
+#define _OKL4_MASK_BIN_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)1U << 9)
+/*lint -esym(621, _OKL4_SHIFT_BIN_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_BIN_TRACEPOINT_DESC (9)
+/*lint -esym(621, _OKL4_WIDTH_BIN_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_BIN_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_RECLEN_MASK) */
+#define _OKL4_TRACEPOINT_DESC_RECLEN_MASK ((_okl4_tracepoint_desc_t)63U << 10) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_MASK_RECLEN_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)63U << 10)
+/*lint -esym(621, _OKL4_SHIFT_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_RECLEN_TRACEPOINT_DESC (10)
+/*lint -esym(621, _OKL4_WIDTH_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_RECLEN_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_CPUID_MASK) */
+#define _OKL4_TRACEPOINT_DESC_CPUID_MASK ((_okl4_tracepoint_desc_t)63U << 16) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_MASK_CPUID_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)63U << 16)
+/*lint -esym(621, _OKL4_SHIFT_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_CPUID_TRACEPOINT_DESC (16)
+/*lint -esym(621, _OKL4_WIDTH_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_CPUID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_THREADID_MASK) */
+#define _OKL4_TRACEPOINT_DESC_THREADID_MASK ((_okl4_tracepoint_desc_t)63U << 22) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_MASK_THREADID_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)63U << 22)
+/*lint -esym(621, _OKL4_SHIFT_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_THREADID_TRACEPOINT_DESC (22)
+/*lint -esym(621, _OKL4_WIDTH_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_THREADID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC__R1_MASK) */
+#define _OKL4_TRACEPOINT_DESC__R1_MASK ((_okl4_tracepoint_desc_t)15U << 28) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK__R1_TRACEPOINT_DESC) */
+#define _OKL4_MASK__R1_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)15U << 28)
+/*lint -esym(621, _OKL4_SHIFT__R1_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT__R1_TRACEPOINT_DESC (28)
+/*lint -esym(621, _OKL4_WIDTH__R1_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH__R1_TRACEPOINT_DESC (4)
+
+
+/*lint -sem(_okl4_tracepoint_desc_getid, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, _okl4_tracepoint_desc_getid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getid(const _okl4_tracepoint_desc_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 8;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setid, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, _okl4_tracepoint_desc_setid) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setid(_okl4_tracepoint_desc_t *x, uint32_t _id)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 8;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_id;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getuser, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, _okl4_tracepoint_desc_getuser) */
+/*lint -esym(714, _okl4_tracepoint_desc_getuser) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getuser(const _okl4_tracepoint_desc_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            _Bool field : 1;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setuser, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, _okl4_tracepoint_desc_setuser) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setuser) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setuser(_okl4_tracepoint_desc_t *x, okl4_bool_t _user)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            _Bool field : 1;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_user;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getbin, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, _okl4_tracepoint_desc_getbin) */
+/*lint -esym(714, _okl4_tracepoint_desc_getbin) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getbin(const _okl4_tracepoint_desc_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 9;
+            _Bool field : 1;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setbin, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, _okl4_tracepoint_desc_setbin) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setbin) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setbin(_okl4_tracepoint_desc_t *x, okl4_bool_t _bin)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 9;
+            _Bool field : 1;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_bin;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getreclen, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, _okl4_tracepoint_desc_getreclen) */
+/*lint -esym(714, _okl4_tracepoint_desc_getreclen) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getreclen(const _okl4_tracepoint_desc_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 10;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setreclen, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, _okl4_tracepoint_desc_setreclen) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setreclen) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setreclen(_okl4_tracepoint_desc_t *x, uint32_t _reclen)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 10;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_reclen;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getcpuid, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, _okl4_tracepoint_desc_getcpuid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getcpuid) */
+OKL4_FORCE_INLINE okl4_count_t
+_okl4_tracepoint_desc_getcpuid(const _okl4_tracepoint_desc_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setcpuid, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, _okl4_tracepoint_desc_setcpuid) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setcpuid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setcpuid(_okl4_tracepoint_desc_t *x, okl4_count_t _cpuid)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_cpuid;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getthreadid, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, _okl4_tracepoint_desc_getthreadid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getthreadid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getthreadid(const _okl4_tracepoint_desc_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 22;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setthreadid, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, _okl4_tracepoint_desc_setthreadid) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setthreadid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setthreadid(_okl4_tracepoint_desc_t *x, uint32_t _threadid)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 22;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_threadid;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getr1, 1p, @n >= 0 && @n <= 15) */
+/*lint -esym(621, _okl4_tracepoint_desc_getr1) */
+/*lint -esym(714, _okl4_tracepoint_desc_getr1) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getr1(const _okl4_tracepoint_desc_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            uint32_t field : 4;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setr1, 2n >= 0 && 2n <= 15) */
+/*lint -esym(714, _okl4_tracepoint_desc_setr1) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setr1) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setr1(_okl4_tracepoint_desc_t *x, uint32_t __r1)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            uint32_t field : 4;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)__r1;
+    *x = _conv.raw;
+}
+/*lint -esym(714, _okl4_tracepoint_desc_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_init(_okl4_tracepoint_desc_t *x)
+{
+    *x = (_okl4_tracepoint_desc_t)0U;
+}
+
+/*lint -esym(714, _okl4_tracepoint_desc_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_desc_t
+_okl4_tracepoint_desc_cast(uint32_t p, okl4_bool_t force)
+{
+    _okl4_tracepoint_desc_t x = (_okl4_tracepoint_desc_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BITS 15..0 -   @ref _OKL4_MASK_CLASS_TRACEPOINT_MASKS
+    - BITS 31..16 -   @ref _OKL4_MASK_SUBSYSTEM_TRACEPOINT_MASKS
+*/
+
+/*lint -esym(621, _okl4_tracepoint_masks_t) */
+typedef uint32_t _okl4_tracepoint_masks_t;
+
+/*lint -esym(621, _okl4_tracepoint_masks_getclass) */
+/*lint -esym(714, _okl4_tracepoint_masks_getclass) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getclass(const _okl4_tracepoint_masks_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_masks_setclass) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setclass(_okl4_tracepoint_masks_t *x, uint32_t _class);
+
+/*lint -esym(621, _okl4_tracepoint_masks_getsubsystem) */
+/*lint -esym(714, _okl4_tracepoint_masks_getsubsystem) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getsubsystem(const _okl4_tracepoint_masks_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_masks_setsubsystem) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setsubsystem(_okl4_tracepoint_masks_t *x, uint32_t _subsystem);
+
+/*lint -esym(714, _okl4_tracepoint_masks_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_init(_okl4_tracepoint_masks_t *x);
+
+/*lint -esym(714, _okl4_tracepoint_masks_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_masks_t
+_okl4_tracepoint_masks_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, _OKL4_TRACEPOINT_MASKS_CLASS_MASK) */
+#define _OKL4_TRACEPOINT_MASKS_CLASS_MASK ((_okl4_tracepoint_masks_t)65535U) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_MASK_CLASS_TRACEPOINT_MASKS ((_okl4_tracepoint_masks_t)65535U)
+/*lint -esym(621, _OKL4_SHIFT_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_SHIFT_CLASS_TRACEPOINT_MASKS (0)
+/*lint -esym(621, _OKL4_WIDTH_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_WIDTH_CLASS_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_TRACEPOINT_MASKS_SUBSYSTEM_MASK) */
+#define _OKL4_TRACEPOINT_MASKS_SUBSYSTEM_MASK ((_okl4_tracepoint_masks_t)65535U << 16) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_MASK_SUBSYSTEM_TRACEPOINT_MASKS ((_okl4_tracepoint_masks_t)65535U << 16)
+/*lint -esym(621, _OKL4_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS (16)
+
+
+/*lint -sem(_okl4_tracepoint_masks_getclass, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, _okl4_tracepoint_masks_getclass) */
+/*lint -esym(714, _okl4_tracepoint_masks_getclass) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getclass(const _okl4_tracepoint_masks_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        _okl4_tracepoint_masks_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_masks_setclass, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, _okl4_tracepoint_masks_setclass) */
+
+/*lint -esym(621, _okl4_tracepoint_masks_setclass) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setclass(_okl4_tracepoint_masks_t *x, uint32_t _class)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        _okl4_tracepoint_masks_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_class;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_masks_getsubsystem, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, _okl4_tracepoint_masks_getsubsystem) */
+/*lint -esym(714, _okl4_tracepoint_masks_getsubsystem) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getsubsystem(const _okl4_tracepoint_masks_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        _okl4_tracepoint_masks_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_masks_setsubsystem, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, _okl4_tracepoint_masks_setsubsystem) */
+
+/*lint -esym(621, _okl4_tracepoint_masks_setsubsystem) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setsubsystem(_okl4_tracepoint_masks_t *x, uint32_t _subsystem)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        _okl4_tracepoint_masks_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_subsystem;
+    *x = _conv.raw;
+}
+/*lint -esym(714, _okl4_tracepoint_masks_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_init(_okl4_tracepoint_masks_t *x)
+{
+    *x = (_okl4_tracepoint_masks_t)0U;
+}
+
+/*lint -esym(714, _okl4_tracepoint_masks_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_masks_t
+_okl4_tracepoint_masks_cast(uint32_t p, okl4_bool_t force)
+{
+    _okl4_tracepoint_masks_t x = (_okl4_tracepoint_masks_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct okl4_tracepoint_entry_base {
+    uint32_t time_offset;
+    _okl4_tracepoint_masks_t masks;
+    _okl4_tracepoint_desc_t description;
+};
+
+
+
+
+
+
+
+typedef uint32_t okl4_tracepoint_evt_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE) */
+#define OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE ((okl4_tracepoint_evt_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE) */
+#define OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE ((okl4_tracepoint_evt_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH) */
+#define OKL4_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH ((okl4_tracepoint_evt_t)0x2U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME) */
+#define OKL4_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME ((okl4_tracepoint_evt_t)0x3U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV ((okl4_tracepoint_evt_t)0x4U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_HALTED) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_HALTED ((okl4_tracepoint_evt_t)0x5U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA ((okl4_tracepoint_evt_t)0x6U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE ((okl4_tracepoint_evt_t)0x7U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT ((okl4_tracepoint_evt_t)0x8U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA ((okl4_tracepoint_evt_t)0x9U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE ((okl4_tracepoint_evt_t)0xaU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT ((okl4_tracepoint_evt_t)0xbU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND ((okl4_tracepoint_evt_t)0xcU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK ((okl4_tracepoint_evt_t)0xdU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE ((okl4_tracepoint_evt_t)0xeU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED ((okl4_tracepoint_evt_t)0xfU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH ((okl4_tracepoint_evt_t)0x10U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE ((okl4_tracepoint_evt_t)0x11U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI ((okl4_tracepoint_evt_t)0x12U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING ((okl4_tracepoint_evt_t)0x13U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD ((okl4_tracepoint_evt_t)0x14U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS ((okl4_tracepoint_evt_t)0x15U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK ((okl4_tracepoint_evt_t)0x16U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE ((okl4_tracepoint_evt_t)0x17U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT ((okl4_tracepoint_evt_t)0x18U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG ((okl4_tracepoint_evt_t)0x19U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL ((okl4_tracepoint_evt_t)0x1aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY ((okl4_tracepoint_evt_t)0x1bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK ((okl4_tracepoint_evt_t)0x1cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS ((okl4_tracepoint_evt_t)0x1dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK ((okl4_tracepoint_evt_t)0x1eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT) */
+#define OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT ((okl4_tracepoint_evt_t)0x1fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) */
+#define OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME ((okl4_tracepoint_evt_t)0x20U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) */
+#define OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL ((okl4_tracepoint_evt_t)0x21U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT ((okl4_tracepoint_evt_t)0x22U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT ((okl4_tracepoint_evt_t)0x23U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE ((okl4_tracepoint_evt_t)0x24U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN ((okl4_tracepoint_evt_t)0x25U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE ((okl4_tracepoint_evt_t)0x26U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN ((okl4_tracepoint_evt_t)0x27U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE ((okl4_tracepoint_evt_t)0x28U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN ((okl4_tracepoint_evt_t)0x29U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE ((okl4_tracepoint_evt_t)0x2aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN ((okl4_tracepoint_evt_t)0x2bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS ((okl4_tracepoint_evt_t)0x2cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS ((okl4_tracepoint_evt_t)0x2dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS ((okl4_tracepoint_evt_t)0x2eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS ((okl4_tracepoint_evt_t)0x2fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) */
+#define OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL ((okl4_tracepoint_evt_t)0x30U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL) */
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL ((okl4_tracepoint_evt_t)0x31U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV) */
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV ((okl4_tracepoint_evt_t)0x32U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND ((okl4_tracepoint_evt_t)0x33U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) */
+#define OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE ((okl4_tracepoint_evt_t)0x34U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER ((okl4_tracepoint_evt_t)0x35U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS ((okl4_tracepoint_evt_t)0x36U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32 ((okl4_tracepoint_evt_t)0x37U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER ((okl4_tracepoint_evt_t)0x38U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS ((okl4_tracepoint_evt_t)0x39U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32 ((okl4_tracepoint_evt_t)0x3aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED ((okl4_tracepoint_evt_t)0x3bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED ((okl4_tracepoint_evt_t)0x3cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE ((okl4_tracepoint_evt_t)0x3dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE ((okl4_tracepoint_evt_t)0x3eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA ((okl4_tracepoint_evt_t)0x3fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE ((okl4_tracepoint_evt_t)0x40U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE ((okl4_tracepoint_evt_t)0x41U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA ((okl4_tracepoint_evt_t)0x42U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND ((okl4_tracepoint_evt_t)0x43U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL ((okl4_tracepoint_evt_t)0x44U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION ((okl4_tracepoint_evt_t)0x45U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME ((okl4_tracepoint_evt_t)0x46U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY ((okl4_tracepoint_evt_t)0x47U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_START) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_START ((okl4_tracepoint_evt_t)0x48U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) */
+#define OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC ((okl4_tracepoint_evt_t)0x49U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET ((okl4_tracepoint_evt_t)0x4aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_START) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_START ((okl4_tracepoint_evt_t)0x4bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP ((okl4_tracepoint_evt_t)0x4cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE ((okl4_tracepoint_evt_t)0x4dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV ((okl4_tracepoint_evt_t)0x4eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE ((okl4_tracepoint_evt_t)0x4fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE ((okl4_tracepoint_evt_t)0x50U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) */
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY ((okl4_tracepoint_evt_t)0x51U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE ((okl4_tracepoint_evt_t)0x52U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_MAX) */
+#define OKL4_TRACEPOINT_EVT_MAX ((okl4_tracepoint_evt_t)0x52U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_INVALID) */
+#define OKL4_TRACEPOINT_EVT_INVALID ((okl4_tracepoint_evt_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_evt_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_evt_is_element_of(okl4_tracepoint_evt_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_evt_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_evt_is_element_of(okl4_tracepoint_evt_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH) ||
+            (var == OKL4_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_HALTED) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_START) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_START) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE));
+}
+
+
+
+typedef uint32_t okl4_tracepoint_level_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_DEBUG) */
+#define OKL4_TRACEPOINT_LEVEL_DEBUG ((okl4_tracepoint_level_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_INFO) */
+#define OKL4_TRACEPOINT_LEVEL_INFO ((okl4_tracepoint_level_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_WARN) */
+#define OKL4_TRACEPOINT_LEVEL_WARN ((okl4_tracepoint_level_t)0x2U)
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_CRITICAL) */
+#define OKL4_TRACEPOINT_LEVEL_CRITICAL ((okl4_tracepoint_level_t)0x3U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_MAX) */
+#define OKL4_TRACEPOINT_LEVEL_MAX ((okl4_tracepoint_level_t)0x3U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_INVALID) */
+#define OKL4_TRACEPOINT_LEVEL_INVALID ((okl4_tracepoint_level_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_level_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_level_is_element_of(okl4_tracepoint_level_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_level_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_level_is_element_of(okl4_tracepoint_level_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_TRACEPOINT_LEVEL_DEBUG) ||
+            (var == OKL4_TRACEPOINT_LEVEL_INFO) ||
+            (var == OKL4_TRACEPOINT_LEVEL_WARN) ||
+            (var == OKL4_TRACEPOINT_LEVEL_CRITICAL));
+}
+
+
+
+typedef uint32_t okl4_tracepoint_mask_t;
+
+
+
+
+
+typedef uint32_t okl4_tracepoint_subsystem_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_SCHEDULER) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_SCHEDULER ((okl4_tracepoint_subsystem_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_TRACE) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_TRACE ((okl4_tracepoint_subsystem_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_CORE) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_CORE ((okl4_tracepoint_subsystem_t)0x2U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_MAX) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_MAX ((okl4_tracepoint_subsystem_t)0x2U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_INVALID) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_INVALID ((okl4_tracepoint_subsystem_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_subsystem_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_subsystem_is_element_of(okl4_tracepoint_subsystem_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_subsystem_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_subsystem_is_element_of(okl4_tracepoint_subsystem_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_TRACEPOINT_SUBSYSTEM_SCHEDULER) ||
+            (var == OKL4_TRACEPOINT_SUBSYSTEM_TRACE) ||
+            (var == OKL4_TRACEPOINT_SUBSYSTEM_CORE));
+}
+
+
+
+struct okl4_tracepoint_unpacked_entry {
+    struct okl4_tracepoint_entry_base entry;
+    uint32_t data[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+
+*/
+
+struct okl4_vclient_info {
+    struct okl4_axon_ep_data axon_ep;
+    __ptr64(void *, opaque);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vcpu_entry {
+    okl4_kcap_t vcpu;
+    okl4_kcap_t ipi;
+    okl4_interrupt_number_t irq;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    okl4_register_t stack_pointer;
+};
+
+
+
+
+
+typedef okl4_arm_mpidr_t okl4_vcpu_id_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_vcpu_table {
+    okl4_count_t num_vcpus;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(struct okl4_vcpu_entry *, vcpu);
+};
+
+
+
+
+/**
+    The okl4_vfp_ctrl_registers object represents the set of control
+    registers in the ARM VFP unit.
+*/
+
+struct okl4_vfp_ctrl_registers {
+    uint32_t fpsr;
+    uint32_t fpcr;
+};
+
+
+
+
+
+
+/**
+    The okl4_vfp_registers_t type represents a set of VFP registers on
+    the native machine.
+*/
+
+typedef struct okl4_vfp_ctrl_registers okl4_vfp_ctrl_registers_t;
+
+
+
+
+/**
+    The okl4_vfp_ops_t object represents the set of operations that may be
+    performed on the ARM VFP unit.
+
+    - @ref OKL4_VFP_OPS_MAX
+    - @ref OKL4_VFP_OPS_INVALID
+*/
+
+typedef uint32_t okl4_vfp_ops_t;
+
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_VFP_OPS_MAX) */
+#define OKL4_VFP_OPS_MAX ((okl4_vfp_ops_t)0x0U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_VFP_OPS_INVALID) */
+#define OKL4_VFP_OPS_INVALID ((okl4_vfp_ops_t)0xffffffffU)
+
+/*lint -esym(714, okl4_vfp_ops_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vfp_ops_is_element_of(okl4_vfp_ops_t var);
+
+
+/*lint -esym(714, okl4_vfp_ops_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vfp_ops_is_element_of(okl4_vfp_ops_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((okl4_bool_t)0);
+}
+
+
+
+struct okl4_vfp_register {
+    __attribute__((aligned(16))) uint8_t __bytes[16];
+};
+
+
+
+
+
+
+
+typedef struct okl4_vfp_register okl4_vfp_register_t;
+
+
+
+
+/**
+    The okl4_vfp_registers object represents the set of registers in the
+    ARM VFP unit, including the control registers.
+*/
+
+struct okl4_vfp_registers {
+    okl4_vfp_register_t v0;
+    okl4_vfp_register_t v1;
+    okl4_vfp_register_t v2;
+    okl4_vfp_register_t v3;
+    okl4_vfp_register_t v4;
+    okl4_vfp_register_t v5;
+    okl4_vfp_register_t v6;
+    okl4_vfp_register_t v7;
+    okl4_vfp_register_t v8;
+    okl4_vfp_register_t v9;
+    okl4_vfp_register_t v10;
+    okl4_vfp_register_t v11;
+    okl4_vfp_register_t v12;
+    okl4_vfp_register_t v13;
+    okl4_vfp_register_t v14;
+    okl4_vfp_register_t v15;
+    okl4_vfp_register_t v16;
+    okl4_vfp_register_t v17;
+    okl4_vfp_register_t v18;
+    okl4_vfp_register_t v19;
+    okl4_vfp_register_t v20;
+    okl4_vfp_register_t v21;
+    okl4_vfp_register_t v22;
+    okl4_vfp_register_t v23;
+    okl4_vfp_register_t v24;
+    okl4_vfp_register_t v25;
+    okl4_vfp_register_t v26;
+    okl4_vfp_register_t v27;
+    okl4_vfp_register_t v28;
+    okl4_vfp_register_t v29;
+    okl4_vfp_register_t v30;
+    okl4_vfp_register_t v31;
+    struct okl4_vfp_ctrl_registers control;
+    _okl4_padding_t __padding0_8; /**< Padding 16 */
+    _okl4_padding_t __padding1_9; /**< Padding 16 */
+    _okl4_padding_t __padding2_10; /**< Padding 16 */
+    _okl4_padding_t __padding3_11; /**< Padding 16 */
+    _okl4_padding_t __padding4_12; /**< Padding 16 */
+    _okl4_padding_t __padding5_13; /**< Padding 16 */
+    _okl4_padding_t __padding6_14; /**< Padding 16 */
+    _okl4_padding_t __padding7_15; /**< Padding 16 */
+};
+
+
+
+
+
+
+/**
+    The okl4_vfp_registers_t type represents a set of VFP registers on
+    the native machine.
+*/
+
+typedef struct okl4_vfp_registers okl4_vfp_registers_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_virtmem_pool {
+    struct okl4_virtmem_item pool;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_virtual_interrupt_lines {
+    okl4_count_t num_lines;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(okl4_kcap_t *, lines);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vserver_info {
+    struct {
+        __ptr64(struct okl4_axon_ep_data *, data);
+        okl4_count_t max_messages;
+        _okl4_padding_t __padding0_4; /**< Padding 8 */
+        _okl4_padding_t __padding1_5; /**< Padding 8 */
+        _okl4_padding_t __padding2_6; /**< Padding 8 */
+        _okl4_padding_t __padding3_7; /**< Padding 8 */
+        okl4_ksize_t message_size;
+    } channels;
+
+    okl4_count_t num_clients;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vservices_service_descriptor {
+    __ptr64(okl4_string_t, name);
+    __ptr64(okl4_string_t, protocol);
+    __ptr64(void *, RESERVED);
+};
+
+
+
+
+
+typedef uint32_t okl4_vservices_transport_type_t;
+
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_AXON) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_AXON ((okl4_vservices_transport_type_t)0x0U)
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER ((okl4_vservices_transport_type_t)0x1U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_MAX) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_MAX ((okl4_vservices_transport_type_t)0x1U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_INVALID) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_INVALID ((okl4_vservices_transport_type_t)0xffffffffU)
+
+/*lint -esym(714, okl4_vservices_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vservices_transport_type_is_element_of(okl4_vservices_transport_type_t var);
+
+
+/*lint -esym(714, okl4_vservices_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vservices_transport_type_is_element_of(okl4_vservices_transport_type_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_VSERVICES_TRANSPORT_TYPE_AXON) ||
+            (var == OKL4_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER));
+}
+
+
+/**
+
+*/
+
+struct okl4_vservices_transport_microvisor {
+    okl4_bool_t is_server;
+    _okl4_padding_t __padding0_1;
+    _okl4_padding_t __padding1_2;
+    _okl4_padding_t __padding2_3;
+    okl4_vservices_transport_type_t type;
+    union {
+        struct {
+            struct okl4_axon_ep_data ep;
+            okl4_ksize_t message_size;
+            okl4_count_t queue_length;
+            _okl4_padding_t __padding0_4; /**< Padding 8 */
+            _okl4_padding_t __padding1_5; /**< Padding 8 */
+            _okl4_padding_t __padding2_6; /**< Padding 8 */
+            _okl4_padding_t __padding3_7; /**< Padding 8 */
+        } axon;
+
+        struct {
+            okl4_ksize_t message_size;
+            okl4_count_t queue_length;
+            _okl4_padding_t __padding0_4; /**< Padding 8 */
+            _okl4_padding_t __padding1_5; /**< Padding 8 */
+            _okl4_padding_t __padding2_6; /**< Padding 8 */
+            _okl4_padding_t __padding3_7; /**< Padding 8 */
+            struct okl4_virtmem_item rx;
+            okl4_count_t rx_batch_size;
+            okl4_count_t rx_notify_bits;
+            struct okl4_virtmem_item tx;
+            okl4_count_t tx_batch_size;
+            okl4_count_t tx_notify_bits;
+        } shared_buffer;
+
+    } u;
+
+    struct okl4_virtual_interrupt_lines virqs_in;
+    struct okl4_virtual_interrupt_lines virqs_out;
+    okl4_count_t num_services;
+    _okl4_padding_t __padding3_4;
+    _okl4_padding_t __padding4_5;
+    _okl4_padding_t __padding5_6;
+    _okl4_padding_t __padding6_7;
+    __ptr64(struct okl4_vservices_service_descriptor *, services);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vservices_transports {
+    okl4_count_t num_transports;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(struct okl4_vservices_transport_microvisor *, transports);
+};
+
+
+
+
+
+typedef struct okl4_axon_data okl4_axon_data_t;
+typedef struct okl4_axon_ep_data okl4_axon_ep_data_t;
+typedef struct okl4_range_item okl4_range_item_t;
+typedef struct okl4_virtmem_item okl4_virtmem_item_t;
+typedef struct okl4_cell_management_item okl4_cell_management_item_t;
+typedef struct okl4_cell_management okl4_cell_management_t;
+typedef struct okl4_segment_mapping okl4_segment_mapping_t;
+typedef struct okl4_cell_management_segments okl4_cell_management_segments_t;
+typedef struct okl4_cell_management_vcpus okl4_cell_management_vcpus_t;
+typedef struct _okl4_env okl4_env_t;
+typedef struct okl4_env_access_cell okl4_env_access_cell_t;
+typedef struct okl4_env_access_entry okl4_env_access_entry_t;
+typedef struct okl4_env_access_table okl4_env_access_table_t;
+typedef struct okl4_env_args okl4_env_args_t;
+typedef struct okl4_env_interrupt_device_map okl4_env_interrupt_device_map_t;
+typedef struct okl4_interrupt okl4_interrupt_t;
+typedef struct okl4_env_interrupt_handle okl4_env_interrupt_handle_t;
+typedef struct okl4_env_interrupt_list okl4_env_interrupt_list_t;
+typedef struct okl4_env_profile_cell okl4_env_profile_cell_t;
+typedef struct okl4_env_profile_cpu okl4_env_profile_cpu_t;
+typedef struct okl4_env_profile_table okl4_env_profile_table_t;
+typedef struct okl4_env_segment okl4_env_segment_t;
+typedef struct okl4_env_segment_table okl4_env_segment_table_t;
+typedef struct okl4_firmware_segment okl4_firmware_segment_t;
+typedef struct okl4_firmware_segments_info okl4_firmware_segments_info_t;
+typedef void (*okl4_irq_callback_t)(okl4_interrupt_number_t irq, void *opaque);
+typedef struct okl4_kmmu okl4_kmmu_t;
+typedef struct okl4_ksp_user_agent okl4_ksp_user_agent_t;
+typedef struct okl4_pipe_data okl4_pipe_data_t;
+typedef struct okl4_pipe_ep_data okl4_pipe_ep_data_t;
+typedef struct okl4_link okl4_link_t;
+typedef struct okl4_links okl4_links_t;
+typedef struct okl4_machine_info okl4_machine_info_t;
+typedef struct okl4_merged_physpool okl4_merged_physpool_t;
+typedef struct okl4_microvisor_timer okl4_microvisor_timer_t;
+typedef struct okl4_schedule_profile_data okl4_schedule_profile_data_t;
+typedef struct okl4_shared_buffer okl4_shared_buffer_t;
+typedef struct okl4_shared_buffers_array okl4_shared_buffers_array_t;
+typedef struct okl4_tracebuffer_env okl4_tracebuffer_env_t;
+typedef struct okl4_vclient_info okl4_vclient_info_t;
+typedef struct okl4_vcpu_entry okl4_vcpu_entry_t;
+typedef struct okl4_vcpu_table okl4_vcpu_table_t;
+typedef struct okl4_virtmem_pool okl4_virtmem_pool_t;
+typedef struct okl4_virtual_interrupt_lines okl4_virtual_interrupt_lines_t;
+typedef struct okl4_vserver_info okl4_vserver_info_t;
+typedef struct okl4_vservices_service_descriptor okl4_vservices_service_descriptor_t;
+typedef struct okl4_vservices_transport_microvisor okl4_vservices_transport_microvisor_t;
+typedef struct okl4_vservices_transports okl4_vservices_transports_t;
+
+/*
+ * Return structures from system calls.
+ */
+/*lint -save -e958 -e959 implicit padding */
+struct _okl4_sys_axon_process_recv_return {
+    okl4_error_t error;
+    okl4_bool_t send_empty;
+};
+
+struct _okl4_sys_axon_set_halted_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_recv_area_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_recv_queue_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_recv_segment_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_send_area_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_send_queue_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_send_segment_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_trigger_send_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_ack_return {
+    okl4_interrupt_number_t irq;
+    uint8_t source;
+};
+
+struct _okl4_sys_interrupt_attach_private_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_attach_shared_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_detach_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_dist_enable_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_eoi_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_get_highest_priority_pending_return {
+    okl4_interrupt_number_t irq;
+    uint8_t source;
+};
+
+struct _okl4_sys_interrupt_get_payload_return {
+    okl4_error_t error;
+    okl4_virq_flags_t payload;
+};
+
+struct _okl4_sys_interrupt_limits_return {
+    okl4_count_t cpunumber;
+    okl4_count_t itnumber;
+};
+
+struct _okl4_sys_interrupt_mask_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_raise_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_binary_point_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_config_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_control_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_priority_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_priority_mask_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_targets_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_unmask_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_kdb_set_object_name_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_ksp_procedure_call_return {
+    okl4_error_t error;
+    okl4_ksp_arg_t ret0;
+    okl4_ksp_arg_t ret1;
+    okl4_ksp_arg_t ret2;
+};
+
+struct _okl4_sys_mmu_attach_segment_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_detach_segment_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_flush_range_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_flush_range_pn_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_lookup_page_return {
+    okl4_error_t error;
+    okl4_psize_tr_t offset;
+    okl4_mmu_lookup_size_t size;
+    _okl4_page_attribute_t page_attr;
+};
+
+struct _okl4_sys_mmu_lookup_pn_return {
+    okl4_mmu_lookup_index_t segment_index;
+    okl4_psize_pn_t offset_pn;
+    okl4_lsize_pn_t count_pn;
+    _okl4_page_attribute_t page_attr;
+};
+
+struct _okl4_sys_mmu_map_page_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_map_pn_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_unmap_page_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_unmap_pn_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_page_attrs_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_page_perms_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_pn_attrs_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_pn_perms_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_performance_null_syscall_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_pipe_control_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_pipe_recv_return {
+    okl4_error_t error;
+    okl4_ksize_t size;
+};
+
+struct _okl4_sys_pipe_send_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_priority_waive_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_get_register_return {
+    uint32_t reg_w0;
+    uint32_t reg_w1;
+    uint32_t reg_w2;
+    uint32_t reg_w3;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_get_registers_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_read_memory32_return {
+    uint32_t data;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_set_register_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_set_registers_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_write_memory32_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_metrics_status_suspended_return {
+    okl4_error_t error;
+    uint32_t power_suspend_version;
+    uint32_t power_suspend_running_count;
+};
+
+struct _okl4_sys_schedule_metrics_watch_suspended_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_cpu_disable_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_cpu_enable_return {
+    okl4_error_t error;
+    uint64_t timestamp;
+};
+
+struct _okl4_sys_schedule_profile_cpu_get_data_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_vcpu_disable_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_vcpu_enable_return {
+    okl4_error_t error;
+    uint64_t timestamp;
+};
+
+struct _okl4_sys_schedule_profile_vcpu_get_data_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_scheduler_suspend_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_cancel_return {
+    uint64_t remaining;
+    okl4_timer_flags_t old_flags;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_get_resolution_return {
+    uint64_t tick_freq;
+    uint32_t a;
+    uint32_t b;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_get_time_return {
+    uint64_t time;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_query_return {
+    uint64_t remaining;
+    okl4_timer_flags_t active_flags;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_start_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_reset_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_start_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_stop_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_switch_mode_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vinterrupt_clear_and_raise_return {
+    okl4_error_t error;
+    okl4_virq_flags_t payload;
+};
+
+struct _okl4_sys_vinterrupt_modify_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vinterrupt_raise_return {
+    okl4_error_t error;
+};
+
+/*lint -restore */
+
+/*
+ * Ensure type sizes have been correctly calculated by the
+ * code generator.  We test to see if the C compiler agrees
+ * with us about the size of the type.
+ */
+
+#if !defined(GLOBAL_STATIC_ASSERT)
+#if defined(__cplusplus)
+/* FIX: we should be able to use static_assert, but it doesn't compile */
+#define GLOBAL_STATIC_ASSERT(expr, msg)
+#else
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#define GLOBAL_STATIC_ASSERT(expr, msg) \
+        _Static_assert(expr, #msg);
+#else
+#define GLOBAL_STATIC_ASSERT(expr, msg)
+#endif
+#endif
+#endif
+
+
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_mpidr_t) == 8U,
+        __autogen_confused_about_sizeof_arm_mpidr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_mpidr_t) == 8U,
+        __autogen_confused_about_alignof_arm_mpidr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_function_t) == 4U,
+        __autogen_confused_about_sizeof_arm_psci_function)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_function_t) == 4U,
+        __autogen_confused_about_alignof_arm_psci_function)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_result_t) == 4U,
+        __autogen_confused_about_sizeof_arm_psci_result)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_result_t) == 4U,
+        __autogen_confused_about_alignof_arm_psci_result)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_suspend_state_t) == 4U,
+        __autogen_confused_about_sizeof_arm_psci_suspend_state)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_suspend_state_t) == 4U,
+        __autogen_confused_about_alignof_arm_psci_suspend_state)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_sctlr_t) == 4U,
+        __autogen_confused_about_sizeof_arm_sctlr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_sctlr_t) == 4U,
+        __autogen_confused_about_alignof_arm_sctlr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_smccc_arch_function_t) == 4U,
+        __autogen_confused_about_sizeof_arm_smccc_arch_function)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_smccc_arch_function_t) == 4U,
+        __autogen_confused_about_alignof_arm_smccc_arch_function)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_smccc_result_t) == 4U,
+        __autogen_confused_about_sizeof_arm_smccc_result)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_smccc_result_t) == 4U,
+        __autogen_confused_about_alignof_arm_smccc_result)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_register) == 8U,
+        __autogen_confused_about_sizeof_atomic_register)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_register) == 8U,
+        __autogen_confused_about_alignof_atomic_register)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_register_t) == 8U,
+        __autogen_confused_about_sizeof_atomic_register_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_register_t) == 8U,
+        __autogen_confused_about_alignof_atomic_register_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint16) == 2U,
+        __autogen_confused_about_sizeof_atomic_uint16)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint16) == 2U,
+        __autogen_confused_about_alignof_atomic_uint16)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint16_t) == 2U,
+        __autogen_confused_about_sizeof_atomic_uint16_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint16_t) == 2U,
+        __autogen_confused_about_alignof_atomic_uint16_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint32) == 4U,
+        __autogen_confused_about_sizeof_atomic_uint32)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint32) == 4U,
+        __autogen_confused_about_alignof_atomic_uint32)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint32_t) == 4U,
+        __autogen_confused_about_sizeof_atomic_uint32_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint32_t) == 4U,
+        __autogen_confused_about_alignof_atomic_uint32_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint64) == 8U,
+        __autogen_confused_about_sizeof_atomic_uint64)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint64) == 8U,
+        __autogen_confused_about_alignof_atomic_uint64)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint64_t) == 8U,
+        __autogen_confused_about_sizeof_atomic_uint64_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint64_t) == 8U,
+        __autogen_confused_about_alignof_atomic_uint64_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint8) == 1U,
+        __autogen_confused_about_sizeof_atomic_uint8)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint8) == 1U,
+        __autogen_confused_about_alignof_atomic_uint8)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint8_t) == 1U,
+        __autogen_confused_about_sizeof_atomic_uint8_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint8_t) == 1U,
+        __autogen_confused_about_alignof_atomic_uint8_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_data) == 12U,
+        __autogen_confused_about_sizeof_axon_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_data) == 4U,
+        __autogen_confused_about_alignof_axon_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_axon_data_info_t) == 8U,
+        __autogen_confused_about_sizeof_axon_data_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_axon_data_info_t) == 8U,
+        __autogen_confused_about_alignof_axon_data_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_ep_data) == 24U,
+        __autogen_confused_about_sizeof_axon_ep_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_ep_data) == 4U,
+        __autogen_confused_about_alignof_axon_ep_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_queue) == 12U,
+        __autogen_confused_about_sizeof_axon_queue)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_queue) == 4U,
+        __autogen_confused_about_alignof_axon_queue)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_queue_entry) == 24U,
+        __autogen_confused_about_sizeof_axon_queue_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_queue_entry) == 8U,
+        __autogen_confused_about_alignof_axon_queue_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_axon_queue_size_t) == 2U,
+        __autogen_confused_about_sizeof_axon_queue_size)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_axon_queue_size_t) == 2U,
+        __autogen_confused_about_alignof_axon_queue_size)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_rx) == 56U,
+        __autogen_confused_about_sizeof_axon_rx)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_rx) == 4U,
+        __autogen_confused_about_alignof_axon_rx)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_tx) == 48U,
+        __autogen_confused_about_sizeof_axon_tx)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_tx) == 4U,
+        __autogen_confused_about_alignof_axon_tx)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_axon_virq_flags_t) == 8U,
+        __autogen_confused_about_sizeof_axon_virq_flags)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_axon_virq_flags_t) == 8U,
+        __autogen_confused_about_alignof_axon_virq_flags)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_page_cache_t) == 4U,
+        __autogen_confused_about_sizeof_cache_attr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_page_cache_t) == 4U,
+        __autogen_confused_about_alignof_cache_attr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_range_item) == 16U,
+        __autogen_confused_about_sizeof_range_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_range_item) == 8U,
+        __autogen_confused_about_alignof_range_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_virtmem_item) == 16U,
+        __autogen_confused_about_sizeof_virtmem_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_virtmem_item) == 8U,
+        __autogen_confused_about_alignof_virtmem_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management_item) == 104U,
+        __autogen_confused_about_sizeof_cell_management_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management_item) == 8U,
+        __autogen_confused_about_alignof_cell_management_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management) == 8U,
+        __autogen_confused_about_sizeof_cell_management)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management) == 8U,
+        __autogen_confused_about_alignof_cell_management)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_segment_mapping) == 32U,
+        __autogen_confused_about_sizeof_segment_mapping)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_segment_mapping) == 8U,
+        __autogen_confused_about_alignof_segment_mapping)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management_segments) == 8U,
+        __autogen_confused_about_sizeof_cell_management_segments)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management_segments) == 8U,
+        __autogen_confused_about_alignof_cell_management_segments)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management_vcpus) == 4U,
+        __autogen_confused_about_sizeof_cell_management_vcpus)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management_vcpus) == 4U,
+        __autogen_confused_about_alignof_cell_management_vcpus)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_cpu_mode_t) == 4U,
+        __autogen_confused_about_sizeof_cpu_mode)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_cpu_mode_t) == 4U,
+        __autogen_confused_about_alignof_cpu_mode)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_env_hdr) == 4U,
+        __autogen_confused_about_sizeof_env_hdr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_env_hdr) == 2U,
+        __autogen_confused_about_alignof_env_hdr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_env_item) == 16U,
+        __autogen_confused_about_sizeof_env_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_env_item) == 8U,
+        __autogen_confused_about_alignof_env_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_env) == 8U,
+        __autogen_confused_about_sizeof_env)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_env) == 8U,
+        __autogen_confused_about_alignof_env)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_access_cell) == 16U,
+        __autogen_confused_about_sizeof_env_access_cell)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_access_cell) == 8U,
+        __autogen_confused_about_alignof_env_access_cell)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_page_perms_t) == 4U,
+        __autogen_confused_about_sizeof_page_perms)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_page_perms_t) == 4U,
+        __autogen_confused_about_alignof_page_perms)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_access_entry) == 48U,
+        __autogen_confused_about_sizeof_env_access_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_access_entry) == 8U,
+        __autogen_confused_about_alignof_env_access_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_access_table) == 24U,
+        __autogen_confused_about_sizeof_env_access_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_access_table) == 8U,
+        __autogen_confused_about_alignof_env_access_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_args) == 8U,
+        __autogen_confused_about_sizeof_env_args)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_args) == 8U,
+        __autogen_confused_about_alignof_env_args)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_interrupt_device_map) == 4U,
+        __autogen_confused_about_sizeof_env_interrupt_device_map)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_interrupt_device_map) == 4U,
+        __autogen_confused_about_alignof_env_interrupt_device_map)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_interrupt) == 4U,
+        __autogen_confused_about_sizeof_okl4_interrupt)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_interrupt) == 4U,
+        __autogen_confused_about_alignof_okl4_interrupt)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_interrupt_handle) == 8U,
+        __autogen_confused_about_sizeof_env_interrupt_handle)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_interrupt_handle) == 4U,
+        __autogen_confused_about_alignof_env_interrupt_handle)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_interrupt_list) == 24U,
+        __autogen_confused_about_sizeof_env_interrupt_list)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_interrupt_list) == 8U,
+        __autogen_confused_about_alignof_env_interrupt_list)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_profile_cell) == 48U,
+        __autogen_confused_about_sizeof_env_profile_cell)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_profile_cell) == 8U,
+        __autogen_confused_about_alignof_env_profile_cell)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_profile_cpu) == 4U,
+        __autogen_confused_about_sizeof_env_profile_cpu)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_profile_cpu) == 4U,
+        __autogen_confused_about_alignof_env_profile_cpu)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_profile_table) == 16U,
+        __autogen_confused_about_sizeof_env_profile_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_profile_table) == 8U,
+        __autogen_confused_about_alignof_env_profile_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_segment) == 24U,
+        __autogen_confused_about_sizeof_env_segment)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_segment) == 8U,
+        __autogen_confused_about_alignof_env_segment)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_segment_table) == 8U,
+        __autogen_confused_about_sizeof_env_segment_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_segment_table) == 8U,
+        __autogen_confused_about_alignof_env_segment_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_error_t) == 4U,
+        __autogen_confused_about_sizeof_error_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_error_t) == 4U,
+        __autogen_confused_about_alignof_error_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_firmware_segment) == 32U,
+        __autogen_confused_about_sizeof_firmware_segment)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_firmware_segment) == 8U,
+        __autogen_confused_about_alignof_firmware_segment)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_firmware_segments_info) == 8U,
+        __autogen_confused_about_sizeof_firmware_segments_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_firmware_segments_info) == 8U,
+        __autogen_confused_about_alignof_firmware_segments_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_gicd_icfgr_t) == 4U,
+        __autogen_confused_about_sizeof_gicd_icfgr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_gicd_icfgr_t) == 4U,
+        __autogen_confused_about_alignof_gicd_icfgr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_sgi_target_t) == 4U,
+        __autogen_confused_about_sizeof_sgi_target)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_sgi_target_t) == 4U,
+        __autogen_confused_about_alignof_sgi_target)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_gicd_sgir_t) == 4U,
+        __autogen_confused_about_sizeof_gicd_sgir)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_gicd_sgir_t) == 4U,
+        __autogen_confused_about_alignof_gicd_sgir)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_kmmu) == 4U,
+        __autogen_confused_about_sizeof_kmmu)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_kmmu) == 4U,
+        __autogen_confused_about_alignof_kmmu)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_ksp_user_agent) == 8U,
+        __autogen_confused_about_sizeof_ksp_user_agent)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_ksp_user_agent) == 4U,
+        __autogen_confused_about_alignof_ksp_user_agent)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_pipe_data) == 8U,
+        __autogen_confused_about_sizeof_pipe_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_pipe_data) == 4U,
+        __autogen_confused_about_alignof_pipe_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_pipe_ep_data) == 16U,
+        __autogen_confused_about_sizeof_pipe_ep_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_pipe_ep_data) == 4U,
+        __autogen_confused_about_alignof_pipe_ep_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_link_role_t) == 4U,
+        __autogen_confused_about_sizeof_link_role)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_link_role_t) == 4U,
+        __autogen_confused_about_alignof_link_role)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_link_transport_type_t) == 4U,
+        __autogen_confused_about_sizeof_link_transport_type)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_link_transport_type_t) == 4U,
+        __autogen_confused_about_alignof_link_transport_type)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_link) == 80U,
+        __autogen_confused_about_sizeof_link)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_link) == 8U,
+        __autogen_confused_about_alignof_link)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_links) == 8U,
+        __autogen_confused_about_sizeof_links)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_links) == 8U,
+        __autogen_confused_about_alignof_links)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_machine_info) == 24U,
+        __autogen_confused_about_sizeof_machine_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_machine_info) == 8U,
+        __autogen_confused_about_alignof_machine_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_merged_physpool) == 16U,
+        __autogen_confused_about_sizeof_merged_physpool)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_merged_physpool) == 8U,
+        __autogen_confused_about_alignof_merged_physpool)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_microvisor_timer) == 8U,
+        __autogen_confused_about_sizeof_microvisor_timer)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_microvisor_timer) == 4U,
+        __autogen_confused_about_alignof_microvisor_timer)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_mmu_lookup_index_t) == 4U,
+        __autogen_confused_about_sizeof_mmu_lookup_index)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_mmu_lookup_index_t) == 4U,
+        __autogen_confused_about_alignof_mmu_lookup_index)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_mmu_lookup_size_t) == 8U,
+        __autogen_confused_about_sizeof_mmu_lookup_size)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_mmu_lookup_size_t) == 8U,
+        __autogen_confused_about_alignof_mmu_lookup_size)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(_okl4_page_attribute_t) == 4U,
+        __autogen_confused_about_sizeof_page_attribute)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(_okl4_page_attribute_t) == 4U,
+        __autogen_confused_about_alignof_page_attribute)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_pipe_control_t) == 1U,
+        __autogen_confused_about_sizeof_pipe_control)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_pipe_control_t) == 1U,
+        __autogen_confused_about_alignof_pipe_control)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_pipe_state_t) == 1U,
+        __autogen_confused_about_sizeof_pipe_state)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_pipe_state_t) == 1U,
+        __autogen_confused_about_alignof_pipe_state)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_register_set_t) == 4U,
+        __autogen_confused_about_sizeof_register_set)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_register_set_t) == 4U,
+        __autogen_confused_about_alignof_register_set)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_register_and_set_t) == 4U,
+        __autogen_confused_about_sizeof_register_and_set)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_register_and_set_t) == 4U,
+        __autogen_confused_about_alignof_register_and_set)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cpu_registers) == 448U,
+        __autogen_confused_about_sizeof_registers)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cpu_registers) == 8U,
+        __autogen_confused_about_alignof_registers)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_cpu_registers_t) == 448U,
+        __autogen_confused_about_sizeof_registers_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_cpu_registers_t) == 8U,
+        __autogen_confused_about_alignof_registers_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_schedule_profile_data) == 32U,
+        __autogen_confused_about_sizeof_schedule_profile_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_schedule_profile_data) == 8U,
+        __autogen_confused_about_alignof_schedule_profile_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_scheduler_virq_flags_t) == 8U,
+        __autogen_confused_about_sizeof_scheduler_virq_flags)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_scheduler_virq_flags_t) == 8U,
+        __autogen_confused_about_alignof_scheduler_virq_flags)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_sdk_version_t) == 4U,
+        __autogen_confused_about_sizeof_sdk_version)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_sdk_version_t) == 4U,
+        __autogen_confused_about_alignof_sdk_version)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_shared_buffer) == 32U,
+        __autogen_confused_about_sizeof_shared_buffer)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_shared_buffer) == 8U,
+        __autogen_confused_about_alignof_shared_buffer)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_shared_buffers_array) == 16U,
+        __autogen_confused_about_sizeof_shared_buffers_array)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_shared_buffers_array) == 8U,
+        __autogen_confused_about_alignof_shared_buffers_array)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_timer_flags_t) == 4U,
+        __autogen_confused_about_sizeof_timer_flags)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_timer_flags_t) == 4U,
+        __autogen_confused_about_alignof_timer_flags)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_tracebuffer_buffer_header) == 40U,
+        __autogen_confused_about_sizeof_tracebuffer_buffer_header)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_tracebuffer_buffer_header) == 8U,
+        __autogen_confused_about_alignof_tracebuffer_buffer_header)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_tracebuffer_env) == 24U,
+        __autogen_confused_about_sizeof_tracebuffer_env)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_tracebuffer_env) == 8U,
+        __autogen_confused_about_alignof_tracebuffer_env)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_tracebuffer_header) == 40U,
+        __autogen_confused_about_sizeof_tracebuffer_header)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_tracebuffer_header) == 8U,
+        __autogen_confused_about_alignof_tracebuffer_header)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_class_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_class)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_class_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_class)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(_okl4_tracepoint_desc_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_desc)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(_okl4_tracepoint_desc_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_desc)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(_okl4_tracepoint_masks_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_masks)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(_okl4_tracepoint_masks_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_masks)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_tracepoint_entry_base) == 12U,
+        __autogen_confused_about_sizeof_tracepoint_entry_base)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_tracepoint_entry_base) == 4U,
+        __autogen_confused_about_alignof_tracepoint_entry_base)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_evt_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_evt)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_evt_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_evt)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_level_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_level)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_level_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_level)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_subsystem_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_subsystem)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_subsystem_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_subsystem)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_tracepoint_unpacked_entry) == 12U,
+        __autogen_confused_about_sizeof_tracepoint_unpacked_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_tracepoint_unpacked_entry) == 4U,
+        __autogen_confused_about_alignof_tracepoint_unpacked_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vclient_info) == 32U,
+        __autogen_confused_about_sizeof_vclient_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vclient_info) == 8U,
+        __autogen_confused_about_alignof_vclient_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vcpu_entry) == 24U,
+        __autogen_confused_about_sizeof_vcpu_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vcpu_entry) == 8U,
+        __autogen_confused_about_alignof_vcpu_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vcpu_table) == 16U,
+        __autogen_confused_about_sizeof_vcpu_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vcpu_table) == 8U,
+        __autogen_confused_about_alignof_vcpu_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vfp_ctrl_registers) == 8U,
+        __autogen_confused_about_sizeof_vfp_ctrl_registers)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vfp_ctrl_registers) == 4U,
+        __autogen_confused_about_alignof_vfp_ctrl_registers)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_ctrl_registers_t) == 8U,
+        __autogen_confused_about_sizeof_vfp_ctrl_registers_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_ctrl_registers_t) == 4U,
+        __autogen_confused_about_alignof_vfp_ctrl_registers_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_ops_t) == 4U,
+        __autogen_confused_about_sizeof_vfp_ops)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_ops_t) == 4U,
+        __autogen_confused_about_alignof_vfp_ops)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vfp_register) == 16U,
+        __autogen_confused_about_sizeof_vfp_register)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vfp_register) == 16U,
+        __autogen_confused_about_alignof_vfp_register)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_register_t) == 16U,
+        __autogen_confused_about_sizeof_vfp_register_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_register_t) == 16U,
+        __autogen_confused_about_alignof_vfp_register_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vfp_registers) == 528U,
+        __autogen_confused_about_sizeof_vfp_registers)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vfp_registers) == 16U,
+        __autogen_confused_about_alignof_vfp_registers)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_registers_t) == 528U,
+        __autogen_confused_about_sizeof_vfp_registers_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_registers_t) == 16U,
+        __autogen_confused_about_alignof_vfp_registers_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_virtmem_pool) == 16U,
+        __autogen_confused_about_sizeof_virtmem_pool)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_virtmem_pool) == 8U,
+        __autogen_confused_about_alignof_virtmem_pool)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_virtual_interrupt_lines) == 16U,
+        __autogen_confused_about_sizeof_virtual_interrupt_lines)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_virtual_interrupt_lines) == 8U,
+        __autogen_confused_about_alignof_virtual_interrupt_lines)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vserver_info) == 32U,
+        __autogen_confused_about_sizeof_vserver_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vserver_info) == 8U,
+        __autogen_confused_about_alignof_vserver_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vservices_service_descriptor) == 24U,
+        __autogen_confused_about_sizeof_vservices_service_descriptor)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_service_descriptor) == 8U,
+        __autogen_confused_about_alignof_vservices_service_descriptor)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vservices_transport_type_t) == 4U,
+        __autogen_confused_about_sizeof_vservices_transport_type)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vservices_transport_type_t) == 4U,
+        __autogen_confused_about_alignof_vservices_transport_type)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vservices_transport_microvisor) == 120U,
+        __autogen_confused_about_sizeof_vservices_transport_microvisor)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transport_microvisor) == 8U,
+        __autogen_confused_about_alignof_vservices_transport_microvisor)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vservices_transports) == 16U,
+        __autogen_confused_about_sizeof_vservices_transports)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
+        __autogen_confused_about_alignof_vservices_transports)
+#endif
+
+#else
+
+/**
+ *  okl4_arm_mpidr_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_AFF0_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF0_ARM_MPIDR (255)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF0_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF0_ARM_MPIDR (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF0_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF0_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_MASK_AFF1_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF1_ARM_MPIDR (255 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF1_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF1_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_MASK_AFF2_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF2_ARM_MPIDR (255 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF2_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF2_ARM_MPIDR (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF2_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF2_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_MASK_MT_ARM_MPIDR) */
+#define OKL4_ASM_MASK_MT_ARM_MPIDR (1 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_MT_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_MT_ARM_MPIDR (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_MT_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_MT_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ASM_MASK_U_ARM_MPIDR) */
+#define OKL4_ASM_MASK_U_ARM_MPIDR (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_U_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_U_ARM_MPIDR (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_U_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_U_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ASM_MASK_MP_ARM_MPIDR) */
+#define OKL4_ASM_MASK_MP_ARM_MPIDR (1 << 31)
+/*lint -esym(621, OKL4_ASM_SHIFT_MP_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_MP_ARM_MPIDR (31)
+/*lint -esym(621, OKL4_ASM_WIDTH_MP_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_MP_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ASM_MASK_AFF3_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF3_ARM_MPIDR (255 << 32)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF3_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF3_ARM_MPIDR (32)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF3_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF3_ARM_MPIDR (8)
+
+
+/**
+ *  uint32_t
+ **/
+/*lint -esym(621, OKL4_AXON_NUM_RECEIVE_QUEUES) */
+#define OKL4_AXON_NUM_RECEIVE_QUEUES (4)
+
+/*lint -esym(621, OKL4_AXON_NUM_SEND_QUEUES) */
+#define OKL4_AXON_NUM_SEND_QUEUES (4)
+
+/*lint -esym(621, _OKL4_POISON) */
+#define _OKL4_POISON (3735928559)
+
+/*lint -esym(621, OKL4_TRACEBUFFER_INVALID_REF) */
+#define OKL4_TRACEBUFFER_INVALID_REF (-1)
+
+/**
+ *  okl4_arm_psci_function_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_VERSION) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_VERSION (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_SUSPEND) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_SUSPEND (0x1)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_OFF) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_OFF (0x2)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_ON) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_ON (0x3)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_AFFINITY_INFO) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_AFFINITY_INFO (0x4)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE (0x5)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE (0x6)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU (0x7)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_OFF) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_OFF (0x8)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_RESET) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_RESET (0x9)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_FEATURES) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_FEATURES (0xa)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_FREEZE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_FREEZE (0xb)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND (0xc)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_NODE_HW_STATE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_NODE_HW_STATE (0xd)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND (0xe)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE (0xf)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY (0x10)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT (0x11)
+
+/**
+ *  okl4_arm_psci_result_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_SUCCESS) */
+#define OKL4_ASM_ARM_PSCI_RESULT_SUCCESS (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_INVALID_ADDRESS) */
+#define OKL4_ASM_ARM_PSCI_RESULT_INVALID_ADDRESS (0xfffffff7)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_DISABLED) */
+#define OKL4_ASM_ARM_PSCI_RESULT_DISABLED (0xfffffff8)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_NOT_PRESENT) */
+#define OKL4_ASM_ARM_PSCI_RESULT_NOT_PRESENT (0xfffffff9)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_INTERNAL_FAILURE) */
+#define OKL4_ASM_ARM_PSCI_RESULT_INTERNAL_FAILURE (0xfffffffa)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_ON_PENDING) */
+#define OKL4_ASM_ARM_PSCI_RESULT_ON_PENDING (0xfffffffb)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_ALREADY_ON) */
+#define OKL4_ASM_ARM_PSCI_RESULT_ALREADY_ON (0xfffffffc)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_DENIED) */
+#define OKL4_ASM_ARM_PSCI_RESULT_DENIED (0xfffffffd)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_INVALID_PARAMETERS) */
+#define OKL4_ASM_ARM_PSCI_RESULT_INVALID_PARAMETERS (0xfffffffe)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_NOT_SUPPORTED) */
+#define OKL4_ASM_ARM_PSCI_RESULT_NOT_SUPPORTED (0xffffffff)
+
+/**
+ *  okl4_arm_psci_suspend_state_t
+ **/
+
+/*lint -esym(621, OKL4_ARM_PSCI_POWER_LEVEL_CPU) */
+#define OKL4_ARM_PSCI_POWER_LEVEL_CPU (0)
+
+/*lint -esym(621, OKL4_ASM_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE (65535)
+/*lint -esym(621, OKL4_ASM_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_ASM_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (3 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (2)
+
+
+/**
+ *  okl4_arm_sctlr_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_MMU_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_MMU_ENABLE_ARM_SCTLR (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_MMU_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_DATA_CACHE_ENABLE_ARM_SCTLR (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_ASM_MASK_STACK_ALIGN_ARM_SCTLR (1 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_STACK_ALIGN_ARM_SCTLR (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_STACK_ALIGN_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_ASM_MASK_STACK_ALIGN_EL0_ARM_SCTLR (1 << 4)
+/*lint -esym(621, OKL4_ASM_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR (4)
+/*lint -esym(621, OKL4_ASM_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR (1 << 5)
+/*lint -esym(621, OKL4_ASM_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR (5)
+/*lint -esym(621, OKL4_ASM_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_ASM_MASK_OKL_HCR_EL2_DC_ARM_SCTLR (1 << 6)
+/*lint -esym(621, OKL4_ASM_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR (6)
+/*lint -esym(621, OKL4_ASM_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_IT_DISABLE_ARM_SCTLR (1 << 7)
+/*lint -esym(621, OKL4_ASM_SHIFT_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_IT_DISABLE_ARM_SCTLR (7)
+/*lint -esym(621, OKL4_ASM_WIDTH_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_IT_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_SETEND_DISABLE_ARM_SCTLR (1 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_SETEND_DISABLE_ARM_SCTLR (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_SETEND_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_ASM_MASK_USER_MASK_ACCESS_ARM_SCTLR (1 << 9)
+/*lint -esym(621, OKL4_ASM_SHIFT_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_USER_MASK_ACCESS_ARM_SCTLR (9)
+/*lint -esym(621, OKL4_ASM_WIDTH_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_USER_MASK_ACCESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_RESERVED11_ARM_SCTLR) */
+#define OKL4_ASM_MASK_RESERVED11_ARM_SCTLR (1 << 11)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED11_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_RESERVED11_ARM_SCTLR (11)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED11_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_RESERVED11_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (1 << 12)
+/*lint -esym(621, OKL4_ASM_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (12)
+/*lint -esym(621, OKL4_ASM_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_ASM_MASK_VECTORS_BIT_ARM_SCTLR (1 << 13)
+/*lint -esym(621, OKL4_ASM_SHIFT_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_VECTORS_BIT_ARM_SCTLR (13)
+/*lint -esym(621, OKL4_ASM_WIDTH_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_VECTORS_BIT_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_ASM_MASK_DCACHE_ZERO_ARM_SCTLR (1 << 14)
+/*lint -esym(621, OKL4_ASM_SHIFT_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_DCACHE_ZERO_ARM_SCTLR (14)
+/*lint -esym(621, OKL4_ASM_WIDTH_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_DCACHE_ZERO_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_USER_CACHE_TYPE_ARM_SCTLR (1 << 15)
+/*lint -esym(621, OKL4_ASM_SHIFT_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_USER_CACHE_TYPE_ARM_SCTLR (15)
+/*lint -esym(621, OKL4_ASM_WIDTH_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_USER_CACHE_TYPE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_ASM_MASK_NO_TRAP_WFI_ARM_SCTLR (1 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_NO_TRAP_WFI_ARM_SCTLR (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_NO_TRAP_WFI_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_NO_TRAP_WFE_ARM_SCTLR (1 << 18)
+/*lint -esym(621, OKL4_ASM_SHIFT_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_NO_TRAP_WFE_ARM_SCTLR (18)
+/*lint -esym(621, OKL4_ASM_WIDTH_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_NO_TRAP_WFE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_MASK_WRITE_EXEC_NEVER_ARM_SCTLR (1 << 19)
+/*lint -esym(621, OKL4_ASM_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR (19)
+/*lint -esym(621, OKL4_ASM_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR (1 << 20)
+/*lint -esym(621, OKL4_ASM_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR (20)
+/*lint -esym(621, OKL4_ASM_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_RESERVED22_ARM_SCTLR) */
+#define OKL4_ASM_MASK_RESERVED22_ARM_SCTLR (1 << 22)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED22_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_RESERVED22_ARM_SCTLR (22)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED22_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_RESERVED22_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_RESERVED23_ARM_SCTLR) */
+#define OKL4_ASM_MASK_RESERVED23_ARM_SCTLR (1 << 23)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED23_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_RESERVED23_ARM_SCTLR (23)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED23_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_RESERVED23_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_MASK_EL0_ENDIANNESS_ARM_SCTLR (1 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_EL0_ENDIANNESS_ARM_SCTLR (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_EL0_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR (1 << 25)
+/*lint -esym(621, OKL4_ASM_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR (25)
+/*lint -esym(621, OKL4_ASM_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_TEX_REMAP_ENABLE_ARM_SCTLR (1 << 28)
+/*lint -esym(621, OKL4_ASM_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR (28)
+/*lint -esym(621, OKL4_ASM_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR (1 << 29)
+/*lint -esym(621, OKL4_ASM_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR (29)
+/*lint -esym(621, OKL4_ASM_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (1)
+
+
+/**
+ *  okl4_arm_smccc_arch_function_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION) */
+#define OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES) */
+#define OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES (0x1)
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1) */
+#define OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1 (0x8000)
+
+/**
+ *  okl4_arm_smccc_result_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_RESULT_SUCCESS) */
+#define OKL4_ASM_ARM_SMCCC_RESULT_SUCCESS (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_RESULT_NOT_SUPPORTED) */
+#define OKL4_ASM_ARM_SMCCC_RESULT_NOT_SUPPORTED (0xffffffff)
+
+/**
+ *  okl4_count_t
+ **/
+/*lint -esym(621, OKL4_DEFAULT_PAGEBITS) */
+#define OKL4_DEFAULT_PAGEBITS (12)
+
+/** The maximum limit for segment index retured in mmu_lookup_segment. */
+/*lint -esym(621, OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK) */
+#define OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK (1023)
+
+/** The maximum limit for segment attachments to a KMMU. */
+/*lint -esym(621, OKL4_KMMU_MAX_SEGMENTS) */
+#define OKL4_KMMU_MAX_SEGMENTS (256)
+
+/*lint -esym(621, OKL4_PROFILE_NO_PCPUS) */
+#define OKL4_PROFILE_NO_PCPUS (-1)
+
+/**
+ *  okl4_kcap_t
+ **/
+/*lint -esym(621, OKL4_KCAP_INVALID) */
+#define OKL4_KCAP_INVALID (-1)
+
+/**
+ *  okl4_interrupt_number_t
+ **/
+/*lint -esym(621, OKL4_INTERRUPT_INVALID_IRQ) */
+#define OKL4_INTERRUPT_INVALID_IRQ (1023)
+
+/*lint -esym(621, OKL4_INVALID_VIRQ) */
+#define OKL4_INVALID_VIRQ (1023)
+
+/**
+ *  okl4_lsize_t
+ **/
+/*lint -esym(621, OKL4_DEFAULT_PAGESIZE) */
+#define OKL4_DEFAULT_PAGESIZE (4096)
+
+/**
+ *  okl4_laddr_t
+ **/
+/*lint -esym(621, OKL4_USER_AREA_END) */
+#define OKL4_USER_AREA_END (17592186044416)
+
+/**
+ *  okl4_axon_data_info_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_PENDING_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_PENDING_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_PENDING_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_PENDING_AXON_DATA_INFO (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_PENDING_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_PENDING_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_MASK_FAILURE_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_FAILURE_AXON_DATA_INFO (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_FAILURE_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_FAILURE_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_MASK_USR_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_USR_AXON_DATA_INFO (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_USR_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_USR_AXON_DATA_INFO (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_USR_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_USR_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_MASK_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_LADDR_AXON_DATA_INFO (2305843009213693951 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_ASM_PRESHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_PRESHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_LADDR_AXON_DATA_INFO (61)
+
+
+/**
+ *  okl4_axon_queue_size_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE (31)
+/*lint -esym(621, OKL4_ASM_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE (5)
+/*lint -esym(621, OKL4_ASM_MASK_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_MASK_MIN_ORDER_AXON_QUEUE_SIZE (31 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE (5)
+
+
+/**
+ *  okl4_axon_virq_flags_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_MASK_READY_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_SHIFT_READY_AXON_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_WIDTH_READY_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_MASK_FAULT_AXON_VIRQ_FLAGS (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_SHIFT_FAULT_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_WIDTH_FAULT_AXON_VIRQ_FLAGS (1)
+
+
+/**
+ *  okl4_page_cache_t
+ **/
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_WRITECOMBINE) */
+#define OKL4_ASM_PAGE_CACHE_WRITECOMBINE (0x0)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEFAULT) */
+#define OKL4_ASM_PAGE_CACHE_DEFAULT (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_IPC_RX) */
+#define OKL4_ASM_PAGE_CACHE_IPC_RX (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_IPC_TX) */
+#define OKL4_ASM_PAGE_CACHE_IPC_TX (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_TRACEBUFFER) */
+#define OKL4_ASM_PAGE_CACHE_TRACEBUFFER (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_WRITEBACK) */
+#define OKL4_ASM_PAGE_CACHE_WRITEBACK (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_IWB_RWA_ONC) */
+#define OKL4_ASM_PAGE_CACHE_IWB_RWA_ONC (0x2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_WRITETHROUGH) */
+#define OKL4_ASM_PAGE_CACHE_WRITETHROUGH (0x3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEVICE_GRE) */
+#define OKL4_ASM_PAGE_CACHE_DEVICE_GRE (0x4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEVICE_NGRE) */
+#define OKL4_ASM_PAGE_CACHE_DEVICE_NGRE (0x5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEVICE) */
+#define OKL4_ASM_PAGE_CACHE_DEVICE (0x6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_STRONG) */
+#define OKL4_ASM_PAGE_CACHE_STRONG (0x7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRNE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRNE (0x8000000)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_MASK) */
+#define OKL4_ASM_PAGE_CACHE_HW_MASK (0x8000000)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRE (0x8000004)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGRE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGRE (0x8000008)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_GRE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_GRE (0x800000c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_WA_NSH (0x8000011)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH (0x8000012)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH (0x8000013)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_NSH (0x8000014)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH (0x8000015)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH (0x8000016)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH (0x8000017)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH (0x8000018)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH (0x8000019)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH (0x800001a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH (0x800001b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH (0x800001c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH (0x800001d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH (0x800001e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH (0x800001f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH (0x8000021)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RA_NSH (0x8000022)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH (0x8000023)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_NSH (0x8000024)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH (0x8000025)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH (0x8000026)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH (0x8000027)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH (0x8000028)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH (0x8000029)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH (0x800002a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH (0x800002b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH (0x800002c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH (0x800002d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH (0x800002e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH (0x800002f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH (0x8000031)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH (0x8000032)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_NSH (0x8000033)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_NSH (0x8000034)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH (0x8000035)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH (0x8000036)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH (0x8000037)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH (0x8000038)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH (0x8000039)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH (0x800003a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH (0x800003b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH (0x800003c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH (0x800003d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH (0x800003e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH (0x800003f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_NSH (0x8000041)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_NSH (0x8000042)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH (0x8000043)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_NC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_NC_NSH (0x8000044)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_NSH (0x8000045)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_NSH (0x8000046)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH (0x8000047)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_NSH (0x8000048)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_NSH (0x8000049)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_NSH (0x800004a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_NSH (0x800004b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_NSH (0x800004c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_NSH (0x800004d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_NSH (0x800004e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_NSH (0x800004f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH (0x8000051)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH (0x8000052)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH (0x8000053)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_NSH (0x8000054)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_WA_NSH (0x8000055)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH (0x8000056)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH (0x8000057)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH (0x8000058)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH (0x8000059)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH (0x800005a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH (0x800005b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH (0x800005c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH (0x800005d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH (0x800005e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH (0x800005f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH (0x8000061)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH (0x8000062)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH (0x8000063)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_NSH (0x8000064)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH (0x8000065)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RA_NSH (0x8000066)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH (0x8000067)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH (0x8000068)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH (0x8000069)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH (0x800006a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH (0x800006b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH (0x800006c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH (0x800006d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH (0x800006e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH (0x800006f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH (0x8000071)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH (0x8000072)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH (0x8000073)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_NSH (0x8000074)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH (0x8000075)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH (0x8000076)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_NSH (0x8000077)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH (0x8000078)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH (0x8000079)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH (0x800007a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH (0x800007b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH (0x800007c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH (0x800007d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH (0x800007e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH (0x800007f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH (0x8000081)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH (0x8000082)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH (0x8000083)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_NSH (0x8000084)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH (0x8000085)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH (0x8000086)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH (0x8000087)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_NA_NSH (0x8000088)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH (0x8000089)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH (0x800008a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH (0x800008b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH (0x800008c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH (0x800008d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH (0x800008e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH (0x800008f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH (0x8000091)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH (0x8000092)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH (0x8000093)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_NSH (0x8000094)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH (0x8000095)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH (0x8000096)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH (0x8000097)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH (0x8000098)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_WA_NSH (0x8000099)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH (0x800009a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH (0x800009b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH (0x800009c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH (0x800009d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH (0x800009e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH (0x800009f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH (0x80000a1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH (0x80000a2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH (0x80000a3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_NSH (0x80000a4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH (0x80000a5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH (0x80000a6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH (0x80000a7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH (0x80000a8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH (0x80000a9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RA_NSH (0x80000aa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH (0x80000ab)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH (0x80000ac)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH (0x80000ad)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH (0x80000ae)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH (0x80000af)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH (0x80000b1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH (0x80000b2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH (0x80000b3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_NSH (0x80000b4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH (0x80000b5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH (0x80000b6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH (0x80000b7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH (0x80000b8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH (0x80000b9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH (0x80000ba)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RWA_NSH (0x80000bb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH (0x80000bc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH (0x80000bd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH (0x80000be)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH (0x80000bf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH (0x80000c1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH (0x80000c2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH (0x80000c3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_NSH (0x80000c4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH (0x80000c5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH (0x80000c6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH (0x80000c7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH (0x80000c8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH (0x80000c9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH (0x80000ca)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH (0x80000cb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_NA_NSH (0x80000cc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH (0x80000cd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH (0x80000ce)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH (0x80000cf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH (0x80000d1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH (0x80000d2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH (0x80000d3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_NSH (0x80000d4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH (0x80000d5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH (0x80000d6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH (0x80000d7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH (0x80000d8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH (0x80000d9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH (0x80000da)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH (0x80000db)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH (0x80000dc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_WA_NSH (0x80000dd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH (0x80000de)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH (0x80000df)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH (0x80000e1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH (0x80000e2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH (0x80000e3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_NSH (0x80000e4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH (0x80000e5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH (0x80000e6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH (0x80000e7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH (0x80000e8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH (0x80000e9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH (0x80000ea)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH (0x80000eb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH (0x80000ec)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH (0x80000ed)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RA_NSH (0x80000ee)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH (0x80000ef)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH (0x80000f1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH (0x80000f2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH (0x80000f3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_NSH (0x80000f4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH (0x80000f5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH (0x80000f6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH (0x80000f7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH (0x80000f8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH (0x80000f9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH (0x80000fa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH (0x80000fb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH (0x80000fc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH (0x80000fd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH (0x80000fe)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RWA_NSH (0x80000ff)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_WA_OSH (0x8000211)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH (0x8000212)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH (0x8000213)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_OSH (0x8000214)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH (0x8000215)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH (0x8000216)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH (0x8000217)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH (0x8000218)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH (0x8000219)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH (0x800021a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH (0x800021b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH (0x800021c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH (0x800021d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH (0x800021e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH (0x800021f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH (0x8000221)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RA_OSH (0x8000222)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH (0x8000223)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_OSH (0x8000224)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH (0x8000225)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH (0x8000226)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH (0x8000227)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH (0x8000228)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH (0x8000229)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH (0x800022a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH (0x800022b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH (0x800022c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH (0x800022d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH (0x800022e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH (0x800022f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH (0x8000231)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH (0x8000232)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_OSH (0x8000233)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_OSH (0x8000234)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH (0x8000235)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH (0x8000236)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH (0x8000237)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH (0x8000238)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH (0x8000239)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH (0x800023a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH (0x800023b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH (0x800023c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH (0x800023d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH (0x800023e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH (0x800023f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_OSH (0x8000241)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_OSH (0x8000242)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH (0x8000243)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_NC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_NC_OSH (0x8000244)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_OSH (0x8000245)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_OSH (0x8000246)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH (0x8000247)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_OSH (0x8000248)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_OSH (0x8000249)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_OSH (0x800024a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_OSH (0x800024b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_OSH (0x800024c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_OSH (0x800024d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_OSH (0x800024e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_OSH (0x800024f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH (0x8000251)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH (0x8000252)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH (0x8000253)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_OSH (0x8000254)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_WA_OSH (0x8000255)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH (0x8000256)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH (0x8000257)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH (0x8000258)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH (0x8000259)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH (0x800025a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH (0x800025b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH (0x800025c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH (0x800025d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH (0x800025e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH (0x800025f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH (0x8000261)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH (0x8000262)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH (0x8000263)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_OSH (0x8000264)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH (0x8000265)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RA_OSH (0x8000266)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH (0x8000267)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH (0x8000268)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH (0x8000269)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH (0x800026a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH (0x800026b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH (0x800026c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH (0x800026d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH (0x800026e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH (0x800026f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH (0x8000271)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH (0x8000272)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH (0x8000273)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_OSH (0x8000274)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH (0x8000275)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH (0x8000276)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_OSH (0x8000277)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH (0x8000278)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH (0x8000279)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH (0x800027a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH (0x800027b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH (0x800027c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH (0x800027d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH (0x800027e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH (0x800027f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH (0x8000281)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH (0x8000282)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH (0x8000283)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_OSH (0x8000284)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH (0x8000285)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH (0x8000286)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH (0x8000287)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_NA_OSH (0x8000288)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH (0x8000289)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH (0x800028a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH (0x800028b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH (0x800028c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH (0x800028d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH (0x800028e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH (0x800028f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH (0x8000291)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH (0x8000292)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH (0x8000293)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_OSH (0x8000294)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH (0x8000295)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH (0x8000296)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH (0x8000297)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH (0x8000298)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_WA_OSH (0x8000299)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH (0x800029a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH (0x800029b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH (0x800029c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH (0x800029d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH (0x800029e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH (0x800029f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH (0x80002a1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH (0x80002a2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH (0x80002a3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_OSH (0x80002a4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH (0x80002a5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH (0x80002a6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH (0x80002a7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH (0x80002a8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH (0x80002a9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RA_OSH (0x80002aa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH (0x80002ab)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH (0x80002ac)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH (0x80002ad)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH (0x80002ae)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH (0x80002af)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH (0x80002b1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH (0x80002b2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH (0x80002b3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_OSH (0x80002b4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH (0x80002b5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH (0x80002b6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH (0x80002b7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH (0x80002b8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH (0x80002b9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH (0x80002ba)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RWA_OSH (0x80002bb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH (0x80002bc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH (0x80002bd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH (0x80002be)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH (0x80002bf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH (0x80002c1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH (0x80002c2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH (0x80002c3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_OSH (0x80002c4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH (0x80002c5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH (0x80002c6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH (0x80002c7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH (0x80002c8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH (0x80002c9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH (0x80002ca)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH (0x80002cb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_NA_OSH (0x80002cc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH (0x80002cd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH (0x80002ce)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH (0x80002cf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH (0x80002d1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH (0x80002d2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH (0x80002d3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_OSH (0x80002d4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH (0x80002d5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH (0x80002d6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH (0x80002d7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH (0x80002d8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH (0x80002d9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH (0x80002da)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH (0x80002db)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH (0x80002dc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_WA_OSH (0x80002dd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH (0x80002de)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH (0x80002df)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH (0x80002e1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH (0x80002e2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH (0x80002e3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_OSH (0x80002e4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH (0x80002e5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH (0x80002e6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH (0x80002e7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH (0x80002e8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH (0x80002e9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH (0x80002ea)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH (0x80002eb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH (0x80002ec)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH (0x80002ed)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RA_OSH (0x80002ee)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH (0x80002ef)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH (0x80002f1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH (0x80002f2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH (0x80002f3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_OSH (0x80002f4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH (0x80002f5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH (0x80002f6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH (0x80002f7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH (0x80002f8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH (0x80002f9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH (0x80002fa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH (0x80002fb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH (0x80002fc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH (0x80002fd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH (0x80002fe)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RWA_OSH (0x80002ff)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_WA_ISH (0x8000311)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH (0x8000312)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH (0x8000313)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_ISH (0x8000314)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH (0x8000315)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH (0x8000316)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH (0x8000317)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH (0x8000318)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH (0x8000319)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH (0x800031a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH (0x800031b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH (0x800031c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH (0x800031d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH (0x800031e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH (0x800031f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH (0x8000321)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RA_ISH (0x8000322)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH (0x8000323)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_ISH (0x8000324)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH (0x8000325)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH (0x8000326)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH (0x8000327)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH (0x8000328)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH (0x8000329)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH (0x800032a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH (0x800032b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH (0x800032c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH (0x800032d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH (0x800032e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH (0x800032f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH (0x8000331)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH (0x8000332)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_ISH (0x8000333)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_ISH (0x8000334)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH (0x8000335)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH (0x8000336)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH (0x8000337)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH (0x8000338)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH (0x8000339)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH (0x800033a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH (0x800033b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH (0x800033c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH (0x800033d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH (0x800033e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH (0x800033f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_ISH (0x8000341)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_ISH (0x8000342)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH (0x8000343)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_NC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_NC_ISH (0x8000344)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_ISH (0x8000345)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_ISH (0x8000346)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH (0x8000347)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_ISH (0x8000348)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_ISH (0x8000349)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_ISH (0x800034a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_ISH (0x800034b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_ISH (0x800034c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_ISH (0x800034d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_ISH (0x800034e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_ISH (0x800034f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH (0x8000351)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH (0x8000352)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH (0x8000353)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_ISH (0x8000354)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_WA_ISH (0x8000355)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH (0x8000356)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH (0x8000357)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH (0x8000358)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH (0x8000359)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH (0x800035a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH (0x800035b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH (0x800035c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH (0x800035d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH (0x800035e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH (0x800035f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH (0x8000361)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH (0x8000362)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH (0x8000363)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_ISH (0x8000364)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH (0x8000365)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RA_ISH (0x8000366)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH (0x8000367)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH (0x8000368)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH (0x8000369)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH (0x800036a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH (0x800036b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH (0x800036c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH (0x800036d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH (0x800036e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH (0x800036f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH (0x8000371)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH (0x8000372)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH (0x8000373)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_ISH (0x8000374)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH (0x8000375)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH (0x8000376)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_ISH (0x8000377)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH (0x8000378)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH (0x8000379)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH (0x800037a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH (0x800037b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH (0x800037c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH (0x800037d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH (0x800037e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH (0x800037f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH (0x8000381)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH (0x8000382)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH (0x8000383)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_ISH (0x8000384)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH (0x8000385)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH (0x8000386)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH (0x8000387)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_NA_ISH (0x8000388)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH (0x8000389)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH (0x800038a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH (0x800038b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH (0x800038c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH (0x800038d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH (0x800038e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH (0x800038f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH (0x8000391)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH (0x8000392)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH (0x8000393)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_ISH (0x8000394)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH (0x8000395)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH (0x8000396)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH (0x8000397)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH (0x8000398)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_WA_ISH (0x8000399)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH (0x800039a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH (0x800039b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH (0x800039c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH (0x800039d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH (0x800039e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH (0x800039f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH (0x80003a1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH (0x80003a2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH (0x80003a3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_ISH (0x80003a4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH (0x80003a5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH (0x80003a6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH (0x80003a7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH (0x80003a8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH (0x80003a9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RA_ISH (0x80003aa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH (0x80003ab)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH (0x80003ac)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH (0x80003ad)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH (0x80003ae)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH (0x80003af)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH (0x80003b1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH (0x80003b2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH (0x80003b3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_ISH (0x80003b4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH (0x80003b5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH (0x80003b6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH (0x80003b7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH (0x80003b8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH (0x80003b9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH (0x80003ba)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RWA_ISH (0x80003bb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH (0x80003bc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH (0x80003bd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH (0x80003be)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH (0x80003bf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH (0x80003c1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH (0x80003c2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH (0x80003c3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_ISH (0x80003c4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH (0x80003c5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH (0x80003c6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH (0x80003c7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH (0x80003c8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH (0x80003c9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH (0x80003ca)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH (0x80003cb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_NA_ISH (0x80003cc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH (0x80003cd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH (0x80003ce)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH (0x80003cf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH (0x80003d1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH (0x80003d2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH (0x80003d3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_ISH (0x80003d4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH (0x80003d5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH (0x80003d6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH (0x80003d7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH (0x80003d8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH (0x80003d9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH (0x80003da)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH (0x80003db)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH (0x80003dc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_WA_ISH (0x80003dd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH (0x80003de)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH (0x80003df)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH (0x80003e1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH (0x80003e2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH (0x80003e3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_ISH (0x80003e4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH (0x80003e5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH (0x80003e6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH (0x80003e7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH (0x80003e8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH (0x80003e9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH (0x80003ea)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH (0x80003eb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH (0x80003ec)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH (0x80003ed)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RA_ISH (0x80003ee)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH (0x80003ef)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH (0x80003f1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH (0x80003f2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH (0x80003f3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_ISH (0x80003f4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH (0x80003f5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH (0x80003f6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH (0x80003f7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH (0x80003f8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH (0x80003f9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH (0x80003fa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH (0x80003fb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH (0x80003fc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH (0x80003fd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH (0x80003fe)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RWA_ISH (0x80003ff)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_MAX) */
+#define OKL4_ASM_PAGE_CACHE_MAX (0x80003ff)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_INVALID) */
+#define OKL4_ASM_PAGE_CACHE_INVALID (0xffffffff)
+
+/**
+ *  okl4_cpu_exec_mode
+ **/
+/*lint -esym(621, OKL4_ARM_MODE) */
+#define OKL4_ARM_MODE (0)
+
+/*lint -esym(621, OKL4_DEFAULT_MODE) */
+#define OKL4_DEFAULT_MODE (4)
+
+/*lint -esym(621, OKL4_JAZELLE_MODE) */
+#define OKL4_JAZELLE_MODE (2)
+
+/*lint -esym(621, OKL4_THUMBEE_MODE) */
+#define OKL4_THUMBEE_MODE (3)
+
+/*lint -esym(621, OKL4_THUMB_MODE) */
+#define OKL4_THUMB_MODE (1)
+
+/**
+ *  okl4_cpu_mode_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_EXEC_MODE_CPU_MODE) */
+#define OKL4_ASM_MASK_EXEC_MODE_CPU_MODE (7)
+/*lint -esym(621, OKL4_ASM_SHIFT_EXEC_MODE_CPU_MODE) */
+#define OKL4_ASM_SHIFT_EXEC_MODE_CPU_MODE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_EXEC_MODE_CPU_MODE) */
+#define OKL4_ASM_WIDTH_EXEC_MODE_CPU_MODE (3)
+/*lint -esym(621, OKL4_ASM_MASK_ENDIAN_CPU_MODE) */
+#define OKL4_ASM_MASK_ENDIAN_CPU_MODE (1 << 7)
+/*lint -esym(621, OKL4_ASM_SHIFT_ENDIAN_CPU_MODE) */
+#define OKL4_ASM_SHIFT_ENDIAN_CPU_MODE (7)
+/*lint -esym(621, OKL4_ASM_WIDTH_ENDIAN_CPU_MODE) */
+#define OKL4_ASM_WIDTH_ENDIAN_CPU_MODE (1)
+
+
+/**
+ *  okl4_page_perms_t
+ **/
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_NONE) */
+#define OKL4_ASM_PAGE_PERMS_NONE (0x0)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_X) */
+#define OKL4_ASM_PAGE_PERMS_X (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_W) */
+#define OKL4_ASM_PAGE_PERMS_W (0x2)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_WX) */
+#define OKL4_ASM_PAGE_PERMS_WX (0x3)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_R) */
+#define OKL4_ASM_PAGE_PERMS_R (0x4)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_RX) */
+#define OKL4_ASM_PAGE_PERMS_RX (0x5)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_RW) */
+#define OKL4_ASM_PAGE_PERMS_RW (0x6)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_RWX) */
+#define OKL4_ASM_PAGE_PERMS_RWX (0x7)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_MAX) */
+#define OKL4_ASM_PAGE_PERMS_MAX (0x7)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_INVALID) */
+#define OKL4_ASM_PAGE_PERMS_INVALID (0xffffffff)
+
+/**
+ *  okl4_error_t
+ **/
+/**
+    KSP returned OK
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_OK) */
+#define OKL4_ASM_ERROR_KSP_OK (0x0)
+/**
+    The operation succeeded
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_OK) */
+#define OKL4_ASM_ERROR_OK (0x0)
+/**
+    The target vCPU was already running.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_ALREADY_STARTED) */
+#define OKL4_ASM_ERROR_ALREADY_STARTED (0x1)
+/**
+    The target vCPU was not running.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_ALREADY_STOPPED) */
+#define OKL4_ASM_ERROR_ALREADY_STOPPED (0x2)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_AREA_TOO_BIG) */
+#define OKL4_ASM_ERROR_AXON_AREA_TOO_BIG (0x3)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_BAD_MESSAGE_SIZE) */
+#define OKL4_ASM_ERROR_AXON_BAD_MESSAGE_SIZE (0x4)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_INVALID_OFFSET) */
+#define OKL4_ASM_ERROR_AXON_INVALID_OFFSET (0x5)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_QUEUE_NOT_MAPPED) */
+#define OKL4_ASM_ERROR_AXON_QUEUE_NOT_MAPPED (0x6)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_QUEUE_NOT_READY) */
+#define OKL4_ASM_ERROR_AXON_QUEUE_NOT_READY (0x7)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED) */
+#define OKL4_ASM_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED (0x8)
+/**
+    A blocking operation was cancelled due to an abort of the operation.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_CANCELLED) */
+#define OKL4_ASM_ERROR_CANCELLED (0x9)
+/**
+    The operation failed due to an existing mapping.  Mapping
+    operations must not overlap an existing mapping.  Unmapping
+    must be performed at the same size as the original mapping.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_EXISTING_MAPPING) */
+#define OKL4_ASM_ERROR_EXISTING_MAPPING (0xa)
+/**
+    The operation requested with a segment failed due to
+    insufficient rights in the segment.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INSUFFICIENT_SEGMENT_RIGHTS) */
+#define OKL4_ASM_ERROR_INSUFFICIENT_SEGMENT_RIGHTS (0xb)
+/**
+    The operation did not complete because it was interrupted by a
+    preemption.  This error value is only used internally.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPTED) */
+#define OKL4_ASM_ERROR_INTERRUPTED (0xc)
+/**
+    Attempt to attach an interrupt to an IRQ number, when the
+    interrupt is already attached to an IRQ number
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPT_ALREADY_ATTACHED) */
+#define OKL4_ASM_ERROR_INTERRUPT_ALREADY_ATTACHED (0xd)
+/**
+    Attempt to use an IRQ number that is out of range, of
+    the wrong type, or not in the correct state
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPT_INVALID_IRQ) */
+#define OKL4_ASM_ERROR_INTERRUPT_INVALID_IRQ (0xe)
+/**
+    Attempt to operate on an unknown IRQ number
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPT_NOT_ATTACHED) */
+#define OKL4_ASM_ERROR_INTERRUPT_NOT_ATTACHED (0xf)
+/**
+    An invalid argument was provided.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_ARGUMENT) */
+#define OKL4_ASM_ERROR_INVALID_ARGUMENT (0x10)
+/**
+    The operation failed because one of the arguments does not refer to a
+    valid object.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_DESIGNATOR) */
+#define OKL4_ASM_ERROR_INVALID_DESIGNATOR (0x11)
+/**
+    The operation failed because the power_state
+    argument is invalid.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_POWER_STATE) */
+#define OKL4_ASM_ERROR_INVALID_POWER_STATE (0x12)
+/**
+    The operation failed because the given segment index does
+    not correspond to an attached physical segment.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_SEGMENT_INDEX) */
+#define OKL4_ASM_ERROR_INVALID_SEGMENT_INDEX (0x13)
+/**
+    A user provided address produced a read or write fault in the operation.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_MEMORY_FAULT) */
+#define OKL4_ASM_ERROR_MEMORY_FAULT (0x14)
+/**
+    The operation failed because there is no mapping at the
+    specified location.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_MISSING_MAPPING) */
+#define OKL4_ASM_ERROR_MISSING_MAPPING (0x15)
+/**
+    The delete operation failed because the KMMU context is not
+    empty.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NON_EMPTY_MMU_CONTEXT) */
+#define OKL4_ASM_ERROR_NON_EMPTY_MMU_CONTEXT (0x16)
+/**
+    The lookup operation failed because the given virtual address
+    of the given KMMU context is not mapped at the given physical
+    segment.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NOT_IN_SEGMENT) */
+#define OKL4_ASM_ERROR_NOT_IN_SEGMENT (0x17)
+/**
+    The operation failed because the caller is not on the last
+    online cpu.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NOT_LAST_CPU) */
+#define OKL4_ASM_ERROR_NOT_LAST_CPU (0x18)
+/**
+    Insufficient resources are available to perform the operation.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NO_RESOURCES) */
+#define OKL4_ASM_ERROR_NO_RESOURCES (0x19)
+/**
+    Operation failed because pipe was not in the required state.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_BAD_STATE) */
+#define OKL4_ASM_ERROR_PIPE_BAD_STATE (0x1a)
+/**
+    Operation failed because no messages are in the queue.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_EMPTY) */
+#define OKL4_ASM_ERROR_PIPE_EMPTY (0x1b)
+/**
+    Operation failed because no memory is available in the queue.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_FULL) */
+#define OKL4_ASM_ERROR_PIPE_FULL (0x1c)
+/**
+    Operation failed because the pipe is in reset or not ready.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_NOT_READY) */
+#define OKL4_ASM_ERROR_PIPE_NOT_READY (0x1d)
+/**
+    Message was truncated because receive buffer size is too small.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_RECV_OVERFLOW) */
+#define OKL4_ASM_ERROR_PIPE_RECV_OVERFLOW (0x1e)
+/**
+    The operation failed because at least one VCPU has a monitored
+    power state and is not currently suspended.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_POWER_VCPU_RESUMED) */
+#define OKL4_ASM_ERROR_POWER_VCPU_RESUMED (0x1f)
+/**
+    The operation requires a segment to be unused, or not attached
+    to an MMU context.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_SEGMENT_USED) */
+#define OKL4_ASM_ERROR_SEGMENT_USED (0x20)
+/*lint -esym(621, OKL4_ASM_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) */
+#define OKL4_ASM_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED (0x21)
+/**
+    The timer is already active, and was not reprogrammed.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_TIMER_ACTIVE) */
+#define OKL4_ASM_ERROR_TIMER_ACTIVE (0x22)
+/**
+    The timer has already been cancelled or expired.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_TIMER_CANCELLED) */
+#define OKL4_ASM_ERROR_TIMER_CANCELLED (0x23)
+/**
+    Operation failed due to a temporary condition, and may be retried.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_TRY_AGAIN) */
+#define OKL4_ASM_ERROR_TRY_AGAIN (0x24)
+/**
+    The non-blocking operation failed because it would
+    block on a resource.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_WOULD_BLOCK) */
+#define OKL4_ASM_ERROR_WOULD_BLOCK (0x25)
+/**
+    Insufficient resources
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_ALLOC_EXHAUSTED) */
+#define OKL4_ASM_ERROR_ALLOC_EXHAUSTED (0x26)
+/**
+    KSP specific error 0
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_0) */
+#define OKL4_ASM_ERROR_KSP_ERROR_0 (0x10000010)
+/**
+    KSP specific error 1
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_1) */
+#define OKL4_ASM_ERROR_KSP_ERROR_1 (0x10000011)
+/**
+    KSP specific error 2
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_2) */
+#define OKL4_ASM_ERROR_KSP_ERROR_2 (0x10000012)
+/**
+    KSP specific error 3
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_3) */
+#define OKL4_ASM_ERROR_KSP_ERROR_3 (0x10000013)
+/**
+    KSP specific error 4
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_4) */
+#define OKL4_ASM_ERROR_KSP_ERROR_4 (0x10000014)
+/**
+    KSP specific error 5
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_5) */
+#define OKL4_ASM_ERROR_KSP_ERROR_5 (0x10000015)
+/**
+    KSP specific error 6
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_6) */
+#define OKL4_ASM_ERROR_KSP_ERROR_6 (0x10000016)
+/**
+    KSP specific error 7
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_7) */
+#define OKL4_ASM_ERROR_KSP_ERROR_7 (0x10000017)
+/**
+    Invalid argument to KSP
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_INVALID_ARG) */
+#define OKL4_ASM_ERROR_KSP_INVALID_ARG (0x80000001)
+/**
+    KSP doesn't implement requested feature
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_NOT_IMPLEMENTED) */
+#define OKL4_ASM_ERROR_KSP_NOT_IMPLEMENTED (0x80000002)
+/**
+    User didn't supply rights for requested feature
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_INSUFFICIENT_RIGHTS) */
+#define OKL4_ASM_ERROR_KSP_INSUFFICIENT_RIGHTS (0x80000003)
+/**
+    Interrupt already registered
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_INTERRUPT_REGISTERED) */
+#define OKL4_ASM_ERROR_KSP_INTERRUPT_REGISTERED (0x80000004)
+/**
+    Requested operation is not implemented.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NOT_IMPLEMENTED) */
+#define OKL4_ASM_ERROR_NOT_IMPLEMENTED (0xffffffff)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_MAX) */
+#define OKL4_ASM_ERROR_MAX (0xffffffff)
+
+/**
+ *  okl4_gicd_icfgr_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_EDGE_GICD_ICFGR) */
+#define OKL4_ASM_MASK_EDGE_GICD_ICFGR (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_EDGE_GICD_ICFGR) */
+#define OKL4_ASM_SHIFT_EDGE_GICD_ICFGR (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_EDGE_GICD_ICFGR) */
+#define OKL4_ASM_WIDTH_EDGE_GICD_ICFGR (1)
+
+
+/**
+ *  okl4_sgi_target_t
+ **/
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_LISTED) */
+#define OKL4_ASM_SGI_TARGET_LISTED (0x0)
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_ALL_OTHERS) */
+#define OKL4_ASM_SGI_TARGET_ALL_OTHERS (0x1)
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_SELF) */
+#define OKL4_ASM_SGI_TARGET_SELF (0x2)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_MAX) */
+#define OKL4_ASM_SGI_TARGET_MAX (0x2)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_INVALID) */
+#define OKL4_ASM_SGI_TARGET_INVALID (0xffffffff)
+
+/**
+ *  okl4_gicd_sgir_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_SGIINTID_GICD_SGIR) */
+#define OKL4_ASM_MASK_SGIINTID_GICD_SGIR (15)
+/*lint -esym(621, OKL4_ASM_SHIFT_SGIINTID_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_SGIINTID_GICD_SGIR (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_SGIINTID_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_SGIINTID_GICD_SGIR (4)
+/*lint -esym(621, OKL4_ASM_MASK_NSATT_GICD_SGIR) */
+#define OKL4_ASM_MASK_NSATT_GICD_SGIR (1 << 15)
+/*lint -esym(621, OKL4_ASM_SHIFT_NSATT_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_NSATT_GICD_SGIR (15)
+/*lint -esym(621, OKL4_ASM_WIDTH_NSATT_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_NSATT_GICD_SGIR (1)
+/*lint -esym(621, OKL4_ASM_MASK_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_ASM_MASK_CPUTARGETLIST_GICD_SGIR (255 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_CPUTARGETLIST_GICD_SGIR (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_CPUTARGETLIST_GICD_SGIR (8)
+/*lint -esym(621, OKL4_ASM_MASK_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_ASM_MASK_TARGETLISTFILTER_GICD_SGIR (3 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_TARGETLISTFILTER_GICD_SGIR (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_TARGETLISTFILTER_GICD_SGIR (2)
+
+
+/**
+ *  okl4_link_role_t
+ **/
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_SYMMETRIC) */
+#define OKL4_ASM_LINK_ROLE_SYMMETRIC (0x0)
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_SERVER) */
+#define OKL4_ASM_LINK_ROLE_SERVER (0x1)
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_CLIENT) */
+#define OKL4_ASM_LINK_ROLE_CLIENT (0x2)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_MAX) */
+#define OKL4_ASM_LINK_ROLE_MAX (0x2)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_INVALID) */
+#define OKL4_ASM_LINK_ROLE_INVALID (0xffffffff)
+
+/**
+ *  okl4_link_transport_type_t
+ **/
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_SHARED_BUFFER (0x0)
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_AXONS) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_AXONS (0x1)
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_PIPES) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_PIPES (0x2)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_MAX) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_MAX (0x2)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_INVALID) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_INVALID (0xffffffff)
+
+/**
+ *  okl4_mmu_lookup_index_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_MASK_ERROR_MMU_LOOKUP_INDEX (65535)
+/*lint -esym(621, OKL4_ASM_SHIFT_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_SHIFT_ERROR_MMU_LOOKUP_INDEX (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_WIDTH_ERROR_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_ASM_MASK_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_MASK_INDEX_MMU_LOOKUP_INDEX (65535 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_SHIFT_INDEX_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_WIDTH_INDEX_MMU_LOOKUP_INDEX (16)
+
+
+/**
+ *  okl4_mmu_lookup_size_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_MASK_SEG_INDEX_MMU_LOOKUP_SIZE (1023)
+/*lint -esym(621, OKL4_ASM_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_ASM_MASK_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_MASK_SIZE_10_MMU_LOOKUP_SIZE (18014398509481983 << 10)
+/*lint -esym(621, OKL4_ASM_SHIFT_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_SHIFT_SIZE_10_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_ASM_WIDTH_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_WIDTH_SIZE_10_MMU_LOOKUP_SIZE (54)
+
+
+/**
+ *  okl4_nanoseconds_t
+ **/
+/** Timer period upper bound is (1 << 55) ns */
+/*lint -esym(621, OKL4_TIMER_MAX_PERIOD_NS) */
+#define OKL4_TIMER_MAX_PERIOD_NS (36028797018963968)
+
+/** Timer period lower bound is 1000000 ns */
+/*lint -esym(621, OKL4_TIMER_MIN_PERIOD_NS) */
+#define OKL4_TIMER_MIN_PERIOD_NS (1000000)
+
+/**
+ *  _okl4_page_attribute_t
+ **/
+
+
+/*lint -esym(621, _OKL4_ASM_MASK_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_MASK_RWX_PAGE_ATTRIBUTE (7)
+/*lint -esym(621, _OKL4_ASM_SHIFT_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_SHIFT_RWX_PAGE_ATTRIBUTE (0)
+/*lint -esym(621, _OKL4_ASM_WIDTH_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_WIDTH_RWX_PAGE_ATTRIBUTE (3)
+/*lint -esym(621, _OKL4_ASM_MASK_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_MASK_ATTRIB_PAGE_ATTRIBUTE (268435455 << 4)
+/*lint -esym(621, _OKL4_ASM_SHIFT_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_SHIFT_ATTRIB_PAGE_ATTRIBUTE (4)
+/*lint -esym(621, _OKL4_ASM_WIDTH_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_WIDTH_ATTRIB_PAGE_ATTRIBUTE (28)
+
+
+/**
+ *  okl4_pipe_control_t
+ **/
+
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_CLR_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_CLR_HALTED (4)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_RESET) */
+#define OKL4_PIPE_CONTROL_OP_RESET (0)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_SET_HALTED (3)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_RX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_RX_READY (2)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_TX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_TX_READY (1)
+
+/*lint -esym(621, OKL4_ASM_MASK_DO_OP_PIPE_CONTROL) */
+#define OKL4_ASM_MASK_DO_OP_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_DO_OP_PIPE_CONTROL) */
+#define OKL4_ASM_SHIFT_DO_OP_PIPE_CONTROL (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_DO_OP_PIPE_CONTROL) */
+#define OKL4_ASM_WIDTH_DO_OP_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_ASM_MASK_OPERATION_PIPE_CONTROL) */
+#define OKL4_ASM_MASK_OPERATION_PIPE_CONTROL (7 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_OPERATION_PIPE_CONTROL) */
+#define OKL4_ASM_SHIFT_OPERATION_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_OPERATION_PIPE_CONTROL) */
+#define OKL4_ASM_WIDTH_OPERATION_PIPE_CONTROL (3)
+
+
+/**
+ *  okl4_pipe_state_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_RESET_PIPE_STATE) */
+#define OKL4_ASM_MASK_RESET_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESET_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_RESET_PIPE_STATE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESET_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_RESET_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_HALTED_PIPE_STATE) */
+#define OKL4_ASM_MASK_HALTED_PIPE_STATE (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_HALTED_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_HALTED_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_RX_READY_PIPE_STATE) */
+#define OKL4_ASM_MASK_RX_READY_PIPE_STATE (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_RX_READY_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_RX_READY_PIPE_STATE (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_RX_READY_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_RX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_TX_READY_PIPE_STATE) */
+#define OKL4_ASM_MASK_TX_READY_PIPE_STATE (1 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_TX_READY_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_TX_READY_PIPE_STATE (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_TX_READY_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_TX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_MASK_RX_AVAILABLE_PIPE_STATE (1 << 4)
+/*lint -esym(621, OKL4_ASM_SHIFT_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_RX_AVAILABLE_PIPE_STATE (4)
+/*lint -esym(621, OKL4_ASM_WIDTH_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_RX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_MASK_TX_AVAILABLE_PIPE_STATE (1 << 5)
+/*lint -esym(621, OKL4_ASM_SHIFT_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_TX_AVAILABLE_PIPE_STATE (5)
+/*lint -esym(621, OKL4_ASM_WIDTH_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_TX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_WAITING_PIPE_STATE) */
+#define OKL4_ASM_MASK_WAITING_PIPE_STATE (1 << 6)
+/*lint -esym(621, OKL4_ASM_SHIFT_WAITING_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_WAITING_PIPE_STATE (6)
+/*lint -esym(621, OKL4_ASM_WIDTH_WAITING_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_WAITING_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_OVERQUOTA_PIPE_STATE) */
+#define OKL4_ASM_MASK_OVERQUOTA_PIPE_STATE (1 << 7)
+/*lint -esym(621, OKL4_ASM_SHIFT_OVERQUOTA_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_OVERQUOTA_PIPE_STATE (7)
+/*lint -esym(621, OKL4_ASM_WIDTH_OVERQUOTA_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_OVERQUOTA_PIPE_STATE (1)
+
+
+/**
+ *  okl4_power_state_t
+ **/
+/*lint -esym(621, OKL4_POWER_STATE_IDLE) */
+#define OKL4_POWER_STATE_IDLE (0)
+
+/*lint -esym(621, OKL4_POWER_STATE_PLATFORM_BASE) */
+#define OKL4_POWER_STATE_PLATFORM_BASE (256)
+
+/*lint -esym(621, OKL4_POWER_STATE_POWEROFF) */
+#define OKL4_POWER_STATE_POWEROFF (1)
+
+/**
+ *  okl4_register_set_t
+ **/
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_CPU_REGS) */
+#define OKL4_ASM_REGISTER_SET_CPU_REGS (0x0)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP_REGS (0x1)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP_CTRL_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP_CTRL_REGS (0x2)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP64_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP64_REGS (0x3)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP128_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP128_REGS (0x4)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_MAX) */
+#define OKL4_ASM_REGISTER_SET_MAX (0x4)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_INVALID) */
+#define OKL4_ASM_REGISTER_SET_INVALID (0xffffffff)
+
+/**
+ *  okl4_register_and_set_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_OFFSET_REGISTER_AND_SET) */
+#define OKL4_ASM_MASK_OFFSET_REGISTER_AND_SET (65535)
+/*lint -esym(621, OKL4_ASM_SHIFT_OFFSET_REGISTER_AND_SET) */
+#define OKL4_ASM_SHIFT_OFFSET_REGISTER_AND_SET (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_OFFSET_REGISTER_AND_SET) */
+#define OKL4_ASM_WIDTH_OFFSET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_ASM_MASK_SET_REGISTER_AND_SET) */
+#define OKL4_ASM_MASK_SET_REGISTER_AND_SET (65535 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_SET_REGISTER_AND_SET) */
+#define OKL4_ASM_SHIFT_SET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_SET_REGISTER_AND_SET) */
+#define OKL4_ASM_WIDTH_SET_REGISTER_AND_SET (16)
+
+
+/**
+ *  okl4_scheduler_virq_flags_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_ASM_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_ASM_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_ASM_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (1)
+
+
+/**
+ *  okl4_sdk_version_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_MAINTENANCE_SDK_VERSION) */
+#define OKL4_ASM_MASK_MAINTENANCE_SDK_VERSION (63)
+/*lint -esym(621, OKL4_ASM_SHIFT_MAINTENANCE_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_MAINTENANCE_SDK_VERSION (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_MAINTENANCE_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_MAINTENANCE_SDK_VERSION (6)
+/*lint -esym(621, OKL4_ASM_MASK_RELEASE_SDK_VERSION) */
+#define OKL4_ASM_MASK_RELEASE_SDK_VERSION (255 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_RELEASE_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_RELEASE_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_ASM_MASK_MINOR_SDK_VERSION) */
+#define OKL4_ASM_MASK_MINOR_SDK_VERSION (63 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_MINOR_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_MINOR_SDK_VERSION (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_MINOR_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_MINOR_SDK_VERSION (6)
+/*lint -esym(621, OKL4_ASM_MASK_MAJOR_SDK_VERSION) */
+#define OKL4_ASM_MASK_MAJOR_SDK_VERSION (15 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_MAJOR_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_MAJOR_SDK_VERSION (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_MAJOR_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_MAJOR_SDK_VERSION (4)
+/*lint -esym(621, OKL4_ASM_MASK_RES0_FLAG_SDK_VERSION) */
+#define OKL4_ASM_MASK_RES0_FLAG_SDK_VERSION (1 << 28)
+/*lint -esym(621, OKL4_ASM_SHIFT_RES0_FLAG_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_RES0_FLAG_SDK_VERSION (28)
+/*lint -esym(621, OKL4_ASM_WIDTH_RES0_FLAG_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_RES0_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_ASM_MASK_DEV_FLAG_SDK_VERSION) */
+#define OKL4_ASM_MASK_DEV_FLAG_SDK_VERSION (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_DEV_FLAG_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_DEV_FLAG_SDK_VERSION (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_DEV_FLAG_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_DEV_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_ASM_MASK_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_ASM_MASK_FORMAT_FLAG_SDK_VERSION (1 << 31)
+/*lint -esym(621, OKL4_ASM_SHIFT_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_FORMAT_FLAG_SDK_VERSION (31)
+/*lint -esym(621, OKL4_ASM_WIDTH_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_FORMAT_FLAG_SDK_VERSION (1)
+
+
+/**
+ *  okl4_timer_flags_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_ACTIVE_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_ACTIVE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_ACTIVE_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_ACTIVE_TIMER_FLAGS (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_ACTIVE_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_ACTIVE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_PERIODIC_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_PERIODIC_TIMER_FLAGS (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_PERIODIC_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_PERIODIC_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_ABSOLUTE_TIMER_FLAGS (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_ABSOLUTE_TIMER_FLAGS (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_ABSOLUTE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_UNITS_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_UNITS_TIMER_FLAGS (1 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_UNITS_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_UNITS_TIMER_FLAGS (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_UNITS_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_UNITS_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_ALIGN_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_ALIGN_TIMER_FLAGS (1 << 4)
+/*lint -esym(621, OKL4_ASM_SHIFT_ALIGN_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_ALIGN_TIMER_FLAGS (4)
+/*lint -esym(621, OKL4_ASM_WIDTH_ALIGN_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_ALIGN_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_WATCHDOG_TIMER_FLAGS (1 << 5)
+/*lint -esym(621, OKL4_ASM_SHIFT_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_WATCHDOG_TIMER_FLAGS (5)
+/*lint -esym(621, OKL4_ASM_WIDTH_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_WATCHDOG_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_RELOAD_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_RELOAD_TIMER_FLAGS (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_RELOAD_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_RELOAD_TIMER_FLAGS (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_RELOAD_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_RELOAD_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_TIMESLICE_TIMER_FLAGS (1 << 31)
+/*lint -esym(621, OKL4_ASM_SHIFT_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_TIMESLICE_TIMER_FLAGS (31)
+/*lint -esym(621, OKL4_ASM_WIDTH_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_TIMESLICE_TIMER_FLAGS (1)
+
+
+/**
+ *  okl4_tracepoint_class_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_THREAD_STATE) */
+#define OKL4_ASM_TRACEPOINT_CLASS_THREAD_STATE (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_SYSCALLS) */
+#define OKL4_ASM_TRACEPOINT_CLASS_SYSCALLS (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_PRIMARY) */
+#define OKL4_ASM_TRACEPOINT_CLASS_PRIMARY (0x2)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_SECONDARY) */
+#define OKL4_ASM_TRACEPOINT_CLASS_SECONDARY (0x3)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_TERTIARY) */
+#define OKL4_ASM_TRACEPOINT_CLASS_TERTIARY (0x4)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_MAX) */
+#define OKL4_ASM_TRACEPOINT_CLASS_MAX (0x4)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_INVALID) */
+#define OKL4_ASM_TRACEPOINT_CLASS_INVALID (0xffffffff)
+
+/**
+ *  _okl4_tracepoint_desc_t
+ **/
+
+
+/*lint -esym(621, _OKL4_ASM_MASK_ID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_ID_TRACEPOINT_DESC (255)
+/*lint -esym(621, _OKL4_ASM_SHIFT_ID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_ID_TRACEPOINT_DESC (0)
+/*lint -esym(621, _OKL4_ASM_WIDTH_ID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_ID_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_ASM_MASK_USER_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_USER_TRACEPOINT_DESC (1 << 8)
+/*lint -esym(621, _OKL4_ASM_SHIFT_USER_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_USER_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_ASM_WIDTH_USER_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_USER_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_ASM_MASK_BIN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_BIN_TRACEPOINT_DESC (1 << 9)
+/*lint -esym(621, _OKL4_ASM_SHIFT_BIN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_BIN_TRACEPOINT_DESC (9)
+/*lint -esym(621, _OKL4_ASM_WIDTH_BIN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_BIN_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_ASM_MASK_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_RECLEN_TRACEPOINT_DESC (63 << 10)
+/*lint -esym(621, _OKL4_ASM_SHIFT_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_RECLEN_TRACEPOINT_DESC (10)
+/*lint -esym(621, _OKL4_ASM_WIDTH_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_RECLEN_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_ASM_MASK_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_CPUID_TRACEPOINT_DESC (63 << 16)
+/*lint -esym(621, _OKL4_ASM_SHIFT_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_CPUID_TRACEPOINT_DESC (16)
+/*lint -esym(621, _OKL4_ASM_WIDTH_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_CPUID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_ASM_MASK_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_THREADID_TRACEPOINT_DESC (63 << 22)
+/*lint -esym(621, _OKL4_ASM_SHIFT_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_THREADID_TRACEPOINT_DESC (22)
+/*lint -esym(621, _OKL4_ASM_WIDTH_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_THREADID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_ASM_MASK__R1_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK__R1_TRACEPOINT_DESC (15 << 28)
+/*lint -esym(621, _OKL4_ASM_SHIFT__R1_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT__R1_TRACEPOINT_DESC (28)
+/*lint -esym(621, _OKL4_ASM_WIDTH__R1_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH__R1_TRACEPOINT_DESC (4)
+
+
+/**
+ *  _okl4_tracepoint_masks_t
+ **/
+
+
+/*lint -esym(621, _OKL4_ASM_MASK_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_MASK_CLASS_TRACEPOINT_MASKS (65535)
+/*lint -esym(621, _OKL4_ASM_SHIFT_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_SHIFT_CLASS_TRACEPOINT_MASKS (0)
+/*lint -esym(621, _OKL4_ASM_WIDTH_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_WIDTH_CLASS_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_ASM_MASK_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_MASK_SUBSYSTEM_TRACEPOINT_MASKS (65535 << 16)
+/*lint -esym(621, _OKL4_ASM_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_ASM_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS (16)
+
+
+/**
+ *  okl4_tracepoint_evt_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH) */
+#define OKL4_ASM_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH (0x2)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME) */
+#define OKL4_ASM_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME (0x3)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV (0x4)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_HALTED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_HALTED (0x5)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA (0x6)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE (0x7)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT (0x8)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA (0x9)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE (0xa)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT (0xb)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND (0xc)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ACK (0xd)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE (0xe)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED (0xf)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH (0x10)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE (0x11)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_EOI (0x12)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING (0x13)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD (0x14)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS (0x15)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_MASK (0x16)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE (0x17)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT (0x18)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG (0x19)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL (0x1a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY (0x1b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK (0x1c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS (0x1d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK (0x1e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_INTERACT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_INTERACT (0x1f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME (0x20)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL (0x21)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT (0x22)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT (0x23)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE (0x24)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN (0x25)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE (0x26)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN (0x27)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE (0x28)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PN (0x29)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE (0x2a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN (0x2b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS (0x2c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS (0x2d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS (0x2e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS (0x2f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL (0x30)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_CONTROL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_CONTROL (0x31)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_RECV) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_RECV (0x32)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_SEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_SEND (0x33)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE (0x34)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER (0x35)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS (0x36)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32 (0x37)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER (0x38)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS (0x39)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32 (0x3a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED (0x3b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED (0x3c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE (0x3d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE (0x3e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA (0x3f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE (0x40)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE (0x41)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA (0x42)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND (0x43)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_CANCEL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_CANCEL (0x44)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION (0x45)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_TIME (0x46)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_QUERY) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_QUERY (0x47)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_START) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_START (0x48)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC (0x49)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_RESET) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_RESET (0x4a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_START) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_START (0x4b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_STOP) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_STOP (0x4c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE (0x4d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV (0x4e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE (0x4f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE (0x50)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY (0x51)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE (0x52)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_MAX) */
+#define OKL4_ASM_TRACEPOINT_EVT_MAX (0x52)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_INVALID) */
+#define OKL4_ASM_TRACEPOINT_EVT_INVALID (0xffffffff)
+
+/**
+ *  okl4_tracepoint_level_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_DEBUG) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_DEBUG (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_INFO) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_INFO (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_WARN) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_WARN (0x2)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_CRITICAL) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_CRITICAL (0x3)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_MAX) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_MAX (0x3)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_INVALID) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_INVALID (0xffffffff)
+
+/**
+ *  okl4_tracepoint_subsystem_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_SCHEDULER) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_SCHEDULER (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_TRACE) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_TRACE (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_CORE) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_CORE (0x2)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_MAX) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_MAX (0x2)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_INVALID) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_INVALID (0xffffffff)
+
+/**
+ *  okl4_vfp_ops_t
+ **/
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VFP_OPS_MAX) */
+#define OKL4_ASM_VFP_OPS_MAX (0x0)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VFP_OPS_INVALID) */
+#define OKL4_ASM_VFP_OPS_INVALID (0xffffffff)
+
+/**
+ *  okl4_vservices_transport_type_t
+ **/
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_AXON) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_AXON (0x0)
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER (0x1)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_MAX) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_MAX (0x1)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_INVALID) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_INVALID (0xffffffff)
+
+
+#endif /* !ASSEMBLY */
+
+#endif /* __AUTO__MICROVISOR_TYPES_H__ */
+/** @} */
+/** @} */
diff --git a/include/microvisor/microvisor.h b/include/microvisor/microvisor.h
new file mode 100644
index 0000000..3bb8d64
--- /dev/null
+++ b/include/microvisor/microvisor.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MICROVISOR_H_
+#define _MICROVISOR_H_
+
+/**
+ * @defgroup lib_microvisor The Microvisor Library
+ *
+ * @{
+ *
+ * The Microvisor Library is the primary low-level API between the OKL4
+ * Microvisor and a Cell application or guest-OS. It also provides certain
+ * common data types such as structure definitions used in these interactions.
+ *
+ */
+
+/**
+ * Temporarily define _Bool to allow C++ compilation of
+ * OKL code that makes use of it.
+ */
+#if defined(__cplusplus) && !defined(_Bool)
+#define _OKL4_CPP_BOOL
+#define _Bool bool
+#endif
+
+#define OKL4_INLINE static inline
+
+#if defined(_lint) || defined(_splint)
+#define OKL4_FORCE_INLINE static
+#else
+#define OKL4_FORCE_INLINE static inline __attribute__((always_inline))
+#endif
+
+#include <microvisor/kernel/types.h>
+#include <microvisor/kernel/microvisor.h>
+#include <microvisor/kernel/syscalls.h>
+#include <microvisor/kernel/offsets.h>
+
+/** @} */
+
+/**
+ * Remove temporary definition of _Bool if it was defined
+ */
+#if defined(_OKL4_CPP_BOOL)
+#undef _Bool
+#undef _OKL4_CPP_BOOL
+#endif
+
+#endif /* _MICROVISOR_H_ */
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 714428c..8750c2c 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -139,12 +139,6 @@
 	int mode;
 };
 
-struct netdev_notify_work {
-	struct delayed_work	work;
-	struct net_device	*dev;
-	struct netdev_bonding_info bonding_info;
-};
-
 struct slave {
 	struct net_device *dev; /* first - useful for panic debug */
 	struct bonding *bond; /* our master */
@@ -171,6 +165,7 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	struct netpoll *np;
 #endif
+	struct delayed_work notify_work;
 	struct kobject kobj;
 	struct rtnl_link_stats64 slave_stats;
 };
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
index 4e86755..8a1772e 100644
--- a/include/net/cnss2.h
+++ b/include/net/cnss2.h
@@ -161,6 +161,7 @@
 extern int cnss_self_recovery(struct device *dev,
 			      enum cnss_recovery_reason reason);
 extern int cnss_force_fw_assert(struct device *dev);
+extern int cnss_force_collect_rddm(struct device *dev);
 extern void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size);
 extern int cnss_get_fw_files_for_target(struct device *dev,
 					struct cnss_fw_files *pfw_files,
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 634d192..a3812e9 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,14 +1,20 @@
 #ifndef __NET_FRAG_H__
 #define __NET_FRAG_H__
 
+#include <linux/rhashtable.h>
+
 struct netns_frags {
-	/* Keep atomic mem on separate cachelines in structs that include it */
-	atomic_t		mem ____cacheline_aligned_in_smp;
 	/* sysctls */
+	long			high_thresh;
+	long			low_thresh;
 	int			timeout;
-	int			high_thresh;
-	int			low_thresh;
 	int			max_dist;
+	struct inet_frags	*f;
+
+	struct rhashtable       rhashtable ____cacheline_aligned_in_smp;
+
+	/* Keep atomic mem on separate cachelines in structs that include it */
+	atomic_long_t		mem ____cacheline_aligned_in_smp;
 };
 
 /**
@@ -24,130 +30,115 @@
 	INET_FRAG_COMPLETE	= BIT(2),
 };
 
+struct frag_v4_compare_key {
+	__be32		saddr;
+	__be32		daddr;
+	u32		user;
+	u32		vif;
+	__be16		id;
+	u16		protocol;
+};
+
+struct frag_v6_compare_key {
+	struct in6_addr	saddr;
+	struct in6_addr	daddr;
+	u32		user;
+	__be32		id;
+	u32		iif;
+};
+
 /**
  * struct inet_frag_queue - fragment queue
  *
- * @lock: spinlock protecting the queue
+ * @node: rhash node
+ * @key: keys identifying this frag.
  * @timer: queue expiration timer
- * @list: hash bucket list
+ * @lock: spinlock protecting this frag
  * @refcnt: reference count of the queue
  * @fragments: received fragments head
+ * @rb_fragments: received fragments rb-tree root
  * @fragments_tail: received fragments tail
+ * @last_run_head: the head of the last "run". see ip_fragment.c
  * @stamp: timestamp of the last received fragment
  * @len: total length of the original datagram
  * @meat: length of received fragments so far
  * @flags: fragment queue flags
  * @max_size: maximum received fragment size
  * @net: namespace that this frag belongs to
- * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
+ * @rcu: rcu head for freeing deferall
  */
 struct inet_frag_queue {
-	spinlock_t		lock;
+	struct rhash_head	node;
+	union {
+		struct frag_v4_compare_key v4;
+		struct frag_v6_compare_key v6;
+	} key;
 	struct timer_list	timer;
-	struct hlist_node	list;
+	spinlock_t		lock;
 	atomic_t		refcnt;
-	struct sk_buff		*fragments;
+	struct sk_buff		*fragments;  /* Used in IPv6. */
+	struct rb_root		rb_fragments; /* Used in IPv4. */
 	struct sk_buff		*fragments_tail;
+	struct sk_buff		*last_run_head;
 	ktime_t			stamp;
 	int			len;
 	int			meat;
 	__u8			flags;
 	u16			max_size;
-	struct netns_frags	*net;
-	struct hlist_node	list_evictor;
-};
-
-#define INETFRAGS_HASHSZ	1024
-
-/* averaged:
- * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
- *	       rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
- *	       struct frag_queue))
- */
-#define INETFRAGS_MAXDEPTH	128
-
-struct inet_frag_bucket {
-	struct hlist_head	chain;
-	spinlock_t		chain_lock;
+	struct netns_frags      *net;
+	struct rcu_head		rcu;
 };
 
 struct inet_frags {
-	struct inet_frag_bucket	hash[INETFRAGS_HASHSZ];
-
-	struct work_struct	frags_work;
-	unsigned int next_bucket;
-	unsigned long last_rebuild_jiffies;
-	bool rebuild;
-
-	/* The first call to hashfn is responsible to initialize
-	 * rnd. This is best done with net_get_random_once.
-	 *
-	 * rnd_seqlock is used to let hash insertion detect
-	 * when it needs to re-lookup the hash chain to use.
-	 */
-	u32			rnd;
-	seqlock_t		rnd_seqlock;
 	int			qsize;
 
-	unsigned int		(*hashfn)(const struct inet_frag_queue *);
-	bool			(*match)(const struct inet_frag_queue *q,
-					 const void *arg);
 	void			(*constructor)(struct inet_frag_queue *q,
 					       const void *arg);
 	void			(*destructor)(struct inet_frag_queue *);
 	void			(*frag_expire)(unsigned long data);
 	struct kmem_cache	*frags_cachep;
 	const char		*frags_cache_name;
+	struct rhashtable_params rhash_params;
 };
 
 int inet_frags_init(struct inet_frags *);
 void inet_frags_fini(struct inet_frags *);
 
-static inline void inet_frags_init_net(struct netns_frags *nf)
+static inline int inet_frags_init_net(struct netns_frags *nf)
 {
-	atomic_set(&nf->mem, 0);
+	atomic_long_set(&nf->mem, 0);
+	return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
 }
-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
+void inet_frags_exit_net(struct netns_frags *nf);
 
-void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
-		struct inet_frags *f, void *key, unsigned int hash);
+void inet_frag_kill(struct inet_frag_queue *q);
+void inet_frag_destroy(struct inet_frag_queue *q);
+struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
 
-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
-				   const char *prefix);
+/* Free all skbs in the queue; return the sum of their truesizes. */
+unsigned int inet_frag_rbtree_purge(struct rb_root *root);
 
-static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
+static inline void inet_frag_put(struct inet_frag_queue *q)
 {
 	if (atomic_dec_and_test(&q->refcnt))
-		inet_frag_destroy(q, f);
-}
-
-static inline bool inet_frag_evicting(struct inet_frag_queue *q)
-{
-	return !hlist_unhashed(&q->list_evictor);
+		inet_frag_destroy(q);
 }
 
 /* Memory Tracking Functions. */
 
-static inline int frag_mem_limit(struct netns_frags *nf)
+static inline long frag_mem_limit(const struct netns_frags *nf)
 {
-	return atomic_read(&nf->mem);
+	return atomic_long_read(&nf->mem);
 }
 
-static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
+static inline void sub_frag_mem_limit(struct netns_frags *nf, long val)
 {
-	atomic_sub(i, &nf->mem);
+	atomic_long_sub(val, &nf->mem);
 }
 
-static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
+static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
 {
-	atomic_add(i, &nf->mem);
-}
-
-static inline int sum_frag_mem_limit(struct netns_frags *nf)
-{
-	return atomic_read(&nf->mem);
+	atomic_long_add(val, &nf->mem);
 }
 
 /* RFC 3168 support :
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index a2f3a49..c2865f5 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -132,12 +132,6 @@
 	return sk->sk_bound_dev_if;
 }
 
-static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
-{
-	return rcu_dereference_check(ireq->ireq_opt,
-				     atomic_read(&ireq->req.rsk_refcnt) > 0);
-}
-
 struct inet_cork {
 	unsigned int		flags;
 	__be32			addr;
diff --git a/include/net/ip.h b/include/net/ip.h
index b1b5ee0..0623529 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -551,7 +551,6 @@
 	return skb;
 }
 #endif
-int ip_frag_mem(struct net *net);
 
 /*
  *	Functions provided by ip_forward.c
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 978387d..a6446d7 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -363,6 +363,7 @@
 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
 int fib_sync_down_addr(struct net_device *dev, __be32 local);
 int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
 
 extern u32 fib_multipath_secret __read_mostly;
 
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 64b0e9d..7cb100d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -330,13 +330,6 @@
 	    idev->cnf.accept_ra;
 }
 
-#if IS_ENABLED(CONFIG_IPV6)
-static inline int ip6_frag_mem(struct net *net)
-{
-	return sum_frag_mem_limit(&net->ipv6.frags);
-}
-#endif
-
 #define IPV6_FRAG_HIGH_THRESH	(4 * 1024*1024)	/* 4194304 */
 #define IPV6_FRAG_LOW_THRESH	(3 * 1024*1024)	/* 3145728 */
 #define IPV6_FRAG_TIMEOUT	(60 * HZ)	/* 60 seconds */
@@ -530,17 +523,8 @@
 	__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
 };
 
-struct ip6_create_arg {
-	__be32 id;
-	u32 user;
-	const struct in6_addr *src;
-	const struct in6_addr *dst;
-	int iif;
-	u8 ecn;
-};
-
 void ip6_frag_init(struct inet_frag_queue *q, const void *a);
-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
+extern const struct rhashtable_params ip6_rhash_params;
 
 /*
  *	Equivalent of ipv4 struct ip
@@ -548,19 +532,13 @@
 struct frag_queue {
 	struct inet_frag_queue	q;
 
-	__be32			id;		/* fragment id		*/
-	u32			user;
-	struct in6_addr		saddr;
-	struct in6_addr		daddr;
-
 	int			iif;
 	unsigned int		csum;
 	__u16			nhoffset;
 	u8			ecn;
 };
 
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
-			   struct inet_frags *frags);
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
 
 static inline bool ipv6_addr_any(const struct in6_addr *a)
 {
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 316694d..008f466 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -87,7 +87,7 @@
  * According to specification 102 622 chapter 4.4 Pipes,
  * the pipe identifier is 7 bits long.
  */
-#define NFC_HCI_MAX_PIPES		127
+#define NFC_HCI_MAX_PIPES		128
 struct nfc_hci_init_data {
 	u8 gate_count;
 	struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
diff --git a/include/soc/qcom/sysmon.h b/include/soc/qcom/sysmon.h
index 2ad3a5e..cca1dcc 100644
--- a/include/soc/qcom/sysmon.h
+++ b/include/soc/qcom/sysmon.h
@@ -40,6 +40,7 @@
  */
 enum ssctl_ssr_event_enum_type {
 	SSCTL_SSR_EVENT_ENUM_TYPE_MIN_ENUM_VAL = -2147483647,
+	SSCTL_SSR_EVENT_INVALID = -1,
 	SSCTL_SSR_EVENT_BEFORE_POWERUP = 0,
 	SSCTL_SSR_EVENT_AFTER_POWERUP = 1,
 	SSCTL_SSR_EVENT_BEFORE_SHUTDOWN = 2,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 435cee5..c2882c2 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -515,3 +515,8 @@
 header-y += msm_rotator.h
 header-y += bgcom_interface.h
 header-y += nfc/
+
+ifneq ($(VSERVICES_SUPPORT), "")
+include include/linux/Kbuild.vservices
+endif
+header-y += okl4-link-shbuf.h
diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h
index c8125ec..23158db 100644
--- a/include/uapi/linux/loop.h
+++ b/include/uapi/linux/loop.h
@@ -88,6 +88,7 @@
 #define LOOP_CHANGE_FD		0x4C06
 #define LOOP_SET_CAPACITY	0x4C07
 #define LOOP_SET_DIRECT_IO	0x4C08
+#define LOOP_SET_BLOCK_SIZE	0x4C09
 
 /* /dev/loop-control interface */
 #define LOOP_CTL_ADD		0x4C80
diff --git a/include/uapi/linux/okl4-link-shbuf.h b/include/uapi/linux/okl4-link-shbuf.h
new file mode 100644
index 0000000..69561bc
--- /dev/null
+++ b/include/uapi/linux/okl4-link-shbuf.h
@@ -0,0 +1,40 @@
+/*
+ *  User-visible interface to driver for inter-cell links using the
+ *  shared-buffer transport.
+ *
+ *  Copyright (c) 2016 Cog Systems Pty Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _LINUX_OKL4_LINK_SHBUF_H
+#define _LINUX_OKL4_LINK_SHBUF_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * Ioctl that indicates a request to raise the outgoing vIRQ. This value is
+ * chosen to avoid conflict with the numbers documented in Linux 4.1's
+ * ioctl-numbers.txt. The argument is a payload to transmit to the receiver.
+ * Note that consecutive transmissions without an interleaved clear of the
+ * interrupt results in the payloads being ORed together.
+ */
+#define OKL4_LINK_SHBUF_IOCTL_IRQ_TX _IOW(0x8d, 1, __u64)
+
+/*
+ * Ioctl that indicates a request to clear any pending incoming vIRQ. The value
+ * returned through the argument to the ioctl is the payload, which is also
+ * cleared.
+ *
+ * The caller cannot distinguish between the cases of no pending interrupt and
+ * a pending interrupt with payload 0. It is expected that the caller is
+ * communicating with a cooperative sender and has polled their file descriptor
+ * to determine there is a pending interrupt before using this ioctl.
+ */
+#define OKL4_LINK_SHBUF_IOCTL_IRQ_CLR _IOR(0x8d, 2, __u64)
+
+#endif /* _LINUX_OKL4_LINK_SHBUF_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index e7a31f8..3442a26 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -55,6 +55,7 @@
 	IPSTATS_MIB_ECT1PKTS,			/* InECT1Pkts */
 	IPSTATS_MIB_ECT0PKTS,			/* InECT0Pkts */
 	IPSTATS_MIB_CEPKTS,			/* InCEPkts */
+	IPSTATS_MIB_REASM_OVERLAPS,		/* ReasmOverlaps */
 	__IPSTATS_MIB_MAX
 };
 
diff --git a/include/vservices/Kbuild b/include/vservices/Kbuild
new file mode 100644
index 0000000..8b955fc
--- /dev/null
+++ b/include/vservices/Kbuild
@@ -0,0 +1,2 @@
+header-y += protocol/
+header-y += ioctl.h
diff --git a/include/vservices/buffer.h b/include/vservices/buffer.h
new file mode 100644
index 0000000..910aa07
--- /dev/null
+++ b/include/vservices/buffer.h
@@ -0,0 +1,239 @@
+/*
+ * include/vservices/buffer.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines simple wrapper types for strings and variable-size buffers
+ * that are stored inside Virtual Services message buffers.
+ */
+
+#ifndef _VSERVICES_BUFFER_H_
+#define _VSERVICES_BUFFER_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+struct vs_mbuf;
+
+/**
+ * struct vs_string - Virtual Services fixed sized string type
+ * @ptr: String pointer
+ * @max_size: Maximum length of the string in bytes
+ *
+ * A handle to a possibly NUL-terminated string stored in a message buffer. If
+ * the size of the string equals to max_size, the string is not NUL-terminated.
+ * If the protocol does not specify an encoding, the encoding is assumed to be
+ * UTF-8. Wide character encodings are not supported by this type; use struct
+ * vs_pbuf for wide character strings.
+ */
+struct vs_string {
+	char *ptr;
+	size_t max_size;
+};
+
+/**
+ * vs_string_copyout - Copy a Virtual Services string to a C string buffer.
+ * @dest: C string to copy to
+ * @src: Virtual Services string to copy from
+ * @max_size: Size of the destination buffer, including the NUL terminator.
+ *
+ * The behaviour is similar to strlcpy(): that is, the copied string
+ * is guaranteed not to exceed the specified size (including the NUL
+ * terminator byte), and is guaranteed to be NUL-terminated as long as
+ * the size is nonzero (unlike strncpy()).
+ *
+ * The return value is the size of the input string (even if the output was
+ * truncated); this is to make truncation easy to detect.
+ */
+static inline size_t
+vs_string_copyout(char *dest, const struct vs_string *src, size_t max_size)
+{
+	size_t src_len = strnlen(src->ptr, src->max_size);
+
+	if (max_size) {
+		size_t dest_len = min(src_len, max_size - 1);
+
+		memcpy(dest, src->ptr, dest_len);
+		dest[dest_len] = '\0';
+	}
+	return src_len;
+}
+
+/**
+ * vs_string_copyin_len - Copy a C string, up to a given length, into a Virtual
+ *                        Services string.
+ * @dest: Virtual Services string to copy to
+ * @src: C string to copy from
+ * @max_size: Maximum number of bytes to copy
+ *
+ * Returns the number of bytes copied, which may be less than the input
+ * string's length.
+ */
+static inline size_t
+vs_string_copyin_len(struct vs_string *dest, const char *src, size_t max_size)
+{
+	strncpy(dest->ptr, src, min(max_size, dest->max_size));
+
+	return strnlen(dest->ptr, dest->max_size);
+}
+
+/**
+ * vs_string_copyin - Copy a C string into a Virtual Services string.
+ * @dest: Virtual Services string to copy to
+ * @src: C string to copy from
+ *
+ * Returns the number of bytes copied, which may be less than the input
+ * string's length.
+ */
+static inline size_t
+vs_string_copyin(struct vs_string *dest, const char *src)
+{
+	return vs_string_copyin_len(dest, src, dest->max_size);
+}
+
+/**
+ * vs_string_length - Return the size of the string stored in a Virtual Services
+ *                    string.
+ * @str: Virtual Service string to get the length of
+ */
+static inline size_t
+vs_string_length(struct vs_string *str)
+{
+	return strnlen(str->ptr, str->max_size);
+}
+
+/**
+ * vs_string_dup - Allocate a C string buffer and copy a Virtual Services string
+ *                 into it.
+ * @str: Virtual Services string to duplicate
+ */
+static inline char *
+vs_string_dup(struct vs_string *str, gfp_t gfp)
+{
+	size_t len;
+	char *ret;
+
+	len = strnlen(str->ptr, str->max_size) + 1;
+	ret = kmalloc(len, gfp);
+	if (ret)
+		vs_string_copyout(ret, str, len);
+	return ret;
+}
+
+/**
+ * vs_string_max_size - Return the maximum size of a Virtual Services string,
+ *                      not including the NUL terminator if the lenght of the
+ *                      string is equal to max_size.
+ *
+ * @str Virtual Services string to return the maximum size of.
+ *
+ * @return The maximum size of the string.
+ */
+static inline size_t
+vs_string_max_size(struct vs_string *str)
+{
+	return str->max_size;
+}
+
+/**
+ * struct vs_pbuf - Handle to a variable-size buffered payload.
+ * @data: Data buffer
+ * @size: Current size of the buffer
+ * @max_size: Maximum size of the buffer
+ *
+ * This is similar to struct vs_string, except that has an explicitly
+ * stored size rather than being null-terminated. The functions that
+ * return ssize_t all return the new size of the modified buffer, and
+ * will return a negative size if the buffer overflows.
+ */
+struct vs_pbuf {
+	void *data;
+	size_t size, max_size;
+};
+
+/**
+ * vs_pbuf_size - Get the size of a pbuf
+ * @pbuf: pbuf to get the size of
+ */
+static inline size_t vs_pbuf_size(const struct vs_pbuf *pbuf)
+{
+	return pbuf->size;
+}
+
+/**
+ * vs_pbuf_data - Get the data pointer for a a pbuf
+ * @pbuf: pbuf to get the data pointer for
+ */
+static inline const void *vs_pbuf_data(const struct vs_pbuf *pbuf)
+{
+	return pbuf->data;
+}
+
+/**
+ * vs_pbuf_resize - Resize a pbuf
+ * @pbuf: pbuf to resize
+ * @size: New size
+ */
+static inline ssize_t vs_pbuf_resize(struct vs_pbuf *pbuf, size_t size)
+{
+	if (size > pbuf->max_size)
+		return -EOVERFLOW;
+
+	pbuf->size = size;
+	return size;
+}
+
+/**
+ * vs_pbuf_copyin - Copy data into a pbuf
+ * @pbuf: pbuf to copy data into
+ * @offset: Offset to copy data to
+ * @data: Pointer to data to copy into the pbuf
+ * @nbytes: Number of bytes to copy into the pbuf
+ */
+static inline ssize_t vs_pbuf_copyin(struct vs_pbuf *pbuf, off_t offset,
+		const void *data, size_t nbytes)
+{
+	if (offset + nbytes > pbuf->size)
+		return -EOVERFLOW;
+
+	memcpy(pbuf->data + offset, data, nbytes);
+
+	return nbytes;
+}
+
+/**
+ * vs_pbuf_append - Append data to a pbuf
+ * @pbuf: pbuf to append to
+ * @data: Pointer to data to append to the pbuf
+ * @nbytes: Number of bytes to append
+ */
+static inline ssize_t vs_pbuf_append(struct vs_pbuf *pbuf,
+		const void *data, size_t nbytes)
+{
+	if (pbuf->size + nbytes > pbuf->max_size)
+		return -EOVERFLOW;
+
+	memcpy(pbuf->data + pbuf->size, data, nbytes);
+	pbuf->size += nbytes;
+
+	return pbuf->size;
+}
+
+/**
+ * vs_pbuf_dup_string - Duplicate the contents of a pbuf as a C string. The
+ * string is allocated and must be freed using kfree.
+ * @pbuf: pbuf to convert
+ * @gfp_flags: GFP flags for the string allocation
+ */
+static inline char *vs_pbuf_dup_string(struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+	return kstrndup(pbuf->data, pbuf->size, gfp_flags);
+}
+
+#endif /* _VSERVICES_BUFFER_H_ */
diff --git a/include/vservices/ioctl.h b/include/vservices/ioctl.h
new file mode 100644
index 0000000..d96fcab
--- /dev/null
+++ b/include/vservices/ioctl.h
@@ -0,0 +1,48 @@
+/*
+ * vservices/ioctl.h - Interface to service character devices
+ *
+ * Copyright (c) 2016, Cog Systems Pty Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PUBLIC_VSERVICES_IOCTL_H__
+#define __LINUX_PUBLIC_VSERVICES_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/* ioctls that work on any opened service device */
+#define IOCTL_VS_RESET_SERVICE		_IO('4', 0)
+#define IOCTL_VS_GET_NAME		_IOR('4', 1, char[16])
+#define IOCTL_VS_GET_PROTOCOL		_IOR('4', 2, char[32])
+
+/*
+ * Claim a device for user I/O (if no kernel driver is attached). The claim
+ * persists until the char device is closed.
+ */
+struct vs_ioctl_bind {
+	__u32 send_quota;
+	__u32 recv_quota;
+	__u32 send_notify_bits;
+	__u32 recv_notify_bits;
+	size_t msg_size;
+};
+#define IOCTL_VS_BIND_CLIENT _IOR('4', 3, struct vs_ioctl_bind)
+#define IOCTL_VS_BIND_SERVER _IOWR('4', 4, struct vs_ioctl_bind)
+
+/* send and receive messages and notifications */
+#define IOCTL_VS_NOTIFY _IOW('4', 5, __u32)
+struct vs_ioctl_iovec {
+	union {
+		__u32 iovcnt; /* input */
+		__u32 notify_bits; /* output (recv only) */
+	};
+	struct iovec *iov;
+};
+#define IOCTL_VS_SEND _IOW('4', 6, struct vs_ioctl_iovec)
+#define IOCTL_VS_RECV _IOWR('4', 7, struct vs_ioctl_iovec)
+
+#endif /* __LINUX_PUBLIC_VSERVICES_IOCTL_H__ */
diff --git a/include/vservices/protocol/Kbuild b/include/vservices/protocol/Kbuild
new file mode 100644
index 0000000..374d9b6
--- /dev/null
+++ b/include/vservices/protocol/Kbuild
@@ -0,0 +1,12 @@
+#
+# Find all of the protocol directory names, and get the basename followed
+# by a trailing slash.
+#
+protocols=$(shell find include/vservices/protocol/ -mindepth 1 -type d -exec basename {} \;)
+protocol_dirs=$(foreach p, $(protocols), $(p)/)
+
+#
+# Export the headers for all protocols. The kbuild file in each protocol
+# directory specifies exactly which headers to export.
+#
+header-y += $(protocol_dirs)
diff --git a/include/vservices/protocol/block/Kbuild b/include/vservices/protocol/block/Kbuild
new file mode 100644
index 0000000..ec3cbe8
--- /dev/null
+++ b/include/vservices/protocol/block/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/block/client.h b/include/vservices/protocol/block/client.h
new file mode 100644
index 0000000..4cd2847
--- /dev/null
+++ b/include/vservices/protocol/block/client.h
@@ -0,0 +1,175 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_BLOCK__)
+#define __VSERVICES_CLIENT_BLOCK__
+
+struct vs_service_device;
+struct vs_client_block_state;
+
+struct vs_client_block {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+    /** session setup **/
+	struct vs_client_block_state *(*alloc) (struct vs_service_device *
+						service);
+	void (*release) (struct vs_client_block_state * _state);
+
+	struct vs_service_driver *driver;
+
+/** Opened, reopened and closed functions **/
+
+	void (*opened) (struct vs_client_block_state * _state);
+
+	void (*reopened) (struct vs_client_block_state * _state);
+
+	void (*closed) (struct vs_client_block_state * _state);
+
+/** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_client_block_state * _state);
+
+	struct {
+		int (*ack_read) (struct vs_client_block_state * _state,
+				 void *_opaque, struct vs_pbuf data,
+				 struct vs_mbuf * _mbuf);
+		int (*nack_read) (struct vs_client_block_state * _state,
+				  void *_opaque,
+				  vservice_block_block_io_error_t err);
+
+		int (*ack_write) (struct vs_client_block_state * _state,
+				  void *_opaque);
+		int (*nack_write) (struct vs_client_block_state * _state,
+				   void *_opaque,
+				   vservice_block_block_io_error_t err);
+
+	} io;
+};
+
+struct vs_client_block_state {
+	vservice_block_state_t state;
+	bool readonly;
+	uint32_t sector_size;
+	uint32_t segment_size;
+	uint64_t device_sectors;
+	bool flushable;
+	bool committable;
+	struct {
+		uint32_t sector_size;
+		uint32_t segment_size;
+	} io;
+	struct vs_service_device *service;
+	bool released;
+};
+
+extern int vs_client_block_reopen(struct vs_client_block_state *_state);
+
+extern int vs_client_block_close(struct vs_client_block_state *_state);
+
+    /** interface block_io **/
+/* command parallel read */
+extern int vs_client_block_io_getbufs_ack_read(struct vs_client_block_state
+					       *_state, struct vs_pbuf *data,
+					       struct vs_mbuf *_mbuf);
+extern int vs_client_block_io_free_ack_read(struct vs_client_block_state
+					    *_state, struct vs_pbuf *data,
+					    struct vs_mbuf *_mbuf);
+extern int vs_client_block_io_req_read(struct vs_client_block_state *_state,
+				       void *_opaque, uint64_t sector_index,
+				       uint32_t num_sects, bool nodelay,
+				       bool flush, gfp_t flags);
+
+	/* command parallel write */
+extern struct vs_mbuf *vs_client_block_io_alloc_req_write(struct
+							  vs_client_block_state
+							  *_state,
+							  struct vs_pbuf *data,
+							  gfp_t flags);
+extern int vs_client_block_io_free_req_write(struct vs_client_block_state
+					     *_state, struct vs_pbuf *data,
+					     struct vs_mbuf *_mbuf);
+extern int vs_client_block_io_req_write(struct vs_client_block_state *_state,
+					void *_opaque, uint64_t sector_index,
+					uint32_t num_sects, bool nodelay,
+					bool flush, bool commit,
+					struct vs_pbuf data,
+					struct vs_mbuf *_mbuf);
+
+/* Status APIs for async parallel commands */
+static inline bool vs_client_block_io_req_read_can_send(struct
+							vs_client_block_state
+							*_state)
+{
+	return !bitmap_full(_state->state.io.read_bitmask,
+			    VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+}
+
+static inline bool vs_client_block_io_req_read_is_pending(struct
+							  vs_client_block_state
+							  *_state)
+{
+	return !bitmap_empty(_state->state.io.read_bitmask,
+			     VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+}
+
+static inline bool vs_client_block_io_req_write_can_send(struct
+							 vs_client_block_state
+							 *_state)
+{
+	return !bitmap_full(_state->state.io.write_bitmask,
+			    VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+}
+
+static inline bool vs_client_block_io_req_write_is_pending(struct
+							   vs_client_block_state
+							   *_state)
+{
+	return !bitmap_empty(_state->state.io.write_bitmask,
+			     VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+}
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_block_client_register(struct vs_client_block *client,
+					    const char *name,
+					    struct module *owner);
+
+static inline int vservice_block_client_register(struct vs_client_block *client,
+						 const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_block_client_register(client, name, this_module);
+}
+
+extern int vservice_block_client_unregister(struct vs_client_block *client);
+
+#endif				/* ! __VSERVICES_CLIENT_BLOCK__ */
diff --git a/include/vservices/protocol/block/common.h b/include/vservices/protocol/block/common.h
new file mode 100644
index 0000000..2779b18
--- /dev/null
+++ b/include/vservices/protocol/block/common.h
@@ -0,0 +1,42 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_BLOCK_PROTOCOL_H__)
+#define __VSERVICES_BLOCK_PROTOCOL_H__
+
+#define VSERVICE_BLOCK_PROTOCOL_NAME "com.ok-labs.block"
+typedef enum {
+	VSERVICE_BLOCK_BASE_REQ_OPEN,
+	VSERVICE_BLOCK_BASE_ACK_OPEN,
+	VSERVICE_BLOCK_BASE_NACK_OPEN,
+	VSERVICE_BLOCK_BASE_REQ_CLOSE,
+	VSERVICE_BLOCK_BASE_ACK_CLOSE,
+	VSERVICE_BLOCK_BASE_NACK_CLOSE,
+	VSERVICE_BLOCK_BASE_REQ_REOPEN,
+	VSERVICE_BLOCK_BASE_ACK_REOPEN,
+	VSERVICE_BLOCK_BASE_NACK_REOPEN,
+	VSERVICE_BLOCK_BASE_MSG_RESET,
+	VSERVICE_BLOCK_IO_REQ_READ,
+	VSERVICE_BLOCK_IO_ACK_READ,
+	VSERVICE_BLOCK_IO_NACK_READ,
+	VSERVICE_BLOCK_IO_REQ_WRITE,
+	VSERVICE_BLOCK_IO_ACK_WRITE,
+	VSERVICE_BLOCK_IO_NACK_WRITE,
+} vservice_block_message_id_t;
+typedef enum {
+	VSERVICE_BLOCK_NBIT_IN__COUNT
+} vservice_block_nbit_in_t;
+
+typedef enum {
+	VSERVICE_BLOCK_NBIT_OUT__COUNT
+} vservice_block_nbit_out_t;
+
+/* Notification mask macros */
+#endif				/* ! __VSERVICES_BLOCK_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/block/server.h b/include/vservices/protocol/block/server.h
new file mode 100644
index 0000000..65b0bfd
--- /dev/null
+++ b/include/vservices/protocol/block/server.h
@@ -0,0 +1,177 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_BLOCK)
+#define VSERVICES_SERVER_BLOCK
+
+struct vs_service_device;
+struct vs_server_block_state;
+
+struct vs_server_block {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+
+	/*
+	 * These are the driver's recommended message quotas. They are used
+	 * by the core service to select message quotas for services with no
+	 * explicitly configured quotas.
+	 */
+	u32 in_quota_best;
+	u32 out_quota_best;
+    /** session setup **/
+	struct vs_server_block_state *(*alloc) (struct vs_service_device *
+						service);
+	void (*release) (struct vs_server_block_state * _state);
+
+	struct vs_service_driver *driver;
+
+/** Open, reopen, close and closed functions **/
+
+	 vs_server_response_type_t(*open) (struct vs_server_block_state *
+					   _state);
+
+	 vs_server_response_type_t(*reopen) (struct vs_server_block_state *
+					     _state);
+
+	 vs_server_response_type_t(*close) (struct vs_server_block_state *
+					    _state);
+
+	void (*closed) (struct vs_server_block_state * _state);
+
+/** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_server_block_state * _state);
+
+	struct {
+		int (*req_read) (struct vs_server_block_state * _state,
+				 uint32_t _opaque, uint64_t sector_index,
+				 uint32_t num_sects, bool nodelay, bool flush);
+
+		int (*req_write) (struct vs_server_block_state * _state,
+				  uint32_t _opaque, uint64_t sector_index,
+				  uint32_t num_sects, bool nodelay, bool flush,
+				  bool commit, struct vs_pbuf data,
+				  struct vs_mbuf * _mbuf);
+
+	} io;
+};
+
+struct vs_server_block_state {
+	vservice_block_state_t state;
+	bool readonly;
+	uint32_t sector_size;
+	uint32_t segment_size;
+	uint64_t device_sectors;
+	bool flushable;
+	bool committable;
+	struct {
+		uint32_t sector_size;
+		uint32_t segment_size;
+	} io;
+	struct vs_service_device *service;
+	bool released;
+};
+
+/** Complete calls for server core functions **/
+extern int vs_server_block_open_complete(struct vs_server_block_state *_state,
+					 vs_server_response_type_t resp);
+
+extern int vs_server_block_close_complete(struct vs_server_block_state *_state,
+					  vs_server_response_type_t resp);
+
+extern int vs_server_block_reopen_complete(struct vs_server_block_state *_state,
+					   vs_server_response_type_t resp);
+
+    /** interface block_io **/
+/* command parallel read */
+extern struct vs_mbuf *vs_server_block_io_alloc_ack_read(struct
+							 vs_server_block_state
+							 *_state,
+							 struct vs_pbuf *data,
+							 gfp_t flags);
+extern int vs_server_block_io_free_ack_read(struct vs_server_block_state
+					    *_state, struct vs_pbuf *data,
+					    struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_send_ack_read(struct vs_server_block_state
+					    *_state, uint32_t _opaque,
+					    struct vs_pbuf data,
+					    struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_send_nack_read(struct vs_server_block_state
+					     *_state, uint32_t _opaque,
+					     vservice_block_block_io_error_t
+					     err, gfp_t flags);
+    /* command parallel write */
+extern int vs_server_block_io_getbufs_req_write(struct vs_server_block_state
+						*_state, struct vs_pbuf *data,
+						struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_free_req_write(struct vs_server_block_state
+					     *_state, struct vs_pbuf *data,
+					     struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_send_ack_write(struct vs_server_block_state
+					     *_state, uint32_t _opaque,
+					     gfp_t flags);
+extern int vs_server_block_io_send_nack_write(struct vs_server_block_state
+					      *_state, uint32_t _opaque,
+					      vservice_block_block_io_error_t
+					      err, gfp_t flags);
+
+static inline bool vs_server_block_io_send_ack_read_is_pending(struct
+							       vs_server_block_state
+							       *_state)
+{
+	return !bitmap_empty(_state->state.io.read_bitmask,
+			     VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+}
+
+static inline bool vs_server_block_io_send_ack_write_is_pending(struct
+								vs_server_block_state
+								*_state)
+{
+	return !bitmap_empty(_state->state.io.write_bitmask,
+			     VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+}
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_block_server_register(struct vs_server_block *server,
+					    const char *name,
+					    struct module *owner);
+
+static inline int vservice_block_server_register(struct vs_server_block *server,
+						 const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_block_server_register(server, name, this_module);
+}
+
+extern int vservice_block_server_unregister(struct vs_server_block *server);
+#endif				/* ! VSERVICES_SERVER_BLOCK */
diff --git a/include/vservices/protocol/block/types.h b/include/vservices/protocol/block/types.h
new file mode 100644
index 0000000..52845a3
--- /dev/null
+++ b/include/vservices/protocol/block/types.h
@@ -0,0 +1,106 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_BLOCK_TYPES_H)
+#define VSERVICES_BLOCK_TYPES_H
+
+#define VSERVICE_BLOCK_IO_READ_MAX_PENDING 1024
+#define VSERVICE_BLOCK_IO_WRITE_MAX_PENDING 1024
+
+typedef enum vservice_block_block_io_error {
+	VSERVICE_BLOCK_INVALID_INDEX,
+	VSERVICE_BLOCK_MEDIA_FAILURE,
+	VSERVICE_BLOCK_MEDIA_TIMEOUT,
+	VSERVICE_BLOCK_UNSUPPORTED_COMMAND,
+	VSERVICE_BLOCK_SERVICE_RESET
+} vservice_block_block_io_error_t;
+
+typedef enum {
+/* state closed */
+	VSERVICE_BASE_STATE_CLOSED = 0,
+	VSERVICE_BASE_STATE_CLOSED__OPEN,
+	VSERVICE_BASE_STATE_CLOSED__CLOSE,
+	VSERVICE_BASE_STATE_CLOSED__REOPEN,
+
+/* state running */
+	VSERVICE_BASE_STATE_RUNNING,
+	VSERVICE_BASE_STATE_RUNNING__OPEN,
+	VSERVICE_BASE_STATE_RUNNING__CLOSE,
+	VSERVICE_BASE_STATE_RUNNING__REOPEN,
+
+	VSERVICE_BASE__RESET = VSERVICE_BASE_STATE_CLOSED
+} vservice_base_statenum_t;
+
+typedef struct {
+	vservice_base_statenum_t statenum;
+} vservice_base_state_t;
+
+#define VSERVICE_BASE_RESET_STATE (vservice_base_state_t) { \
+.statenum = VSERVICE_BASE__RESET}
+
+#define VSERVICE_BASE_STATE_IS_CLOSED(state) (\
+((state).statenum == VSERVICE_BASE_STATE_CLOSED) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__REOPEN))
+
+#define VSERVICE_BASE_STATE_IS_RUNNING(state) (\
+((state).statenum == VSERVICE_BASE_STATE_RUNNING) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__REOPEN))
+
+#define VSERVICE_BASE_STATE_VALID(state) ( \
+VSERVICE_BASE_STATE_IS_CLOSED(state) ? true : \
+VSERVICE_BASE_STATE_IS_RUNNING(state) ? true : \
+false)
+
+static inline const char *vservice_base_get_state_string(vservice_base_state_t
+							 state)
+{
+	static const char *names[] =
+	    { "closed", "closed__open", "closed__close", "closed__reopen",
+		"running", "running__open", "running__close", "running__reopen"
+	};
+	if (!VSERVICE_BASE_STATE_VALID(state)) {
+		return "INVALID";
+	}
+	return names[state.statenum];
+}
+
+typedef struct {
+	DECLARE_BITMAP(read_bitmask, VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+	void *read_tags[VSERVICE_BLOCK_IO_READ_MAX_PENDING];
+	 DECLARE_BITMAP(write_bitmask, VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+	void *write_tags[VSERVICE_BLOCK_IO_WRITE_MAX_PENDING];
+} vservice_block_io_state_t;
+
+#define VSERVICE_BLOCK_IO_RESET_STATE (vservice_block_io_state_t) { \
+.read_bitmask = {0}, \
+.read_tags = {NULL}, \
+.write_bitmask = {0}, \
+.write_tags = {NULL}}
+
+#define VSERVICE_BLOCK_IO_STATE_VALID(state) true
+
+typedef struct {
+
+	vservice_base_state_t base;
+
+	vservice_block_io_state_t io;
+} vservice_block_state_t;
+
+#define VSERVICE_BLOCK_RESET_STATE (vservice_block_state_t) {\
+.base = VSERVICE_BASE_RESET_STATE,\
+.io = VSERVICE_BLOCK_IO_RESET_STATE }
+
+#define VSERVICE_BLOCK_IS_STATE_RESET(state) \
+            ((state).base.statenum == VSERVICE_BASE__RESET)
+#endif				/* ! VSERVICES_BLOCK_TYPES_H */
diff --git a/include/vservices/protocol/core.h b/include/vservices/protocol/core.h
new file mode 100644
index 0000000..3a86af5
--- /dev/null
+++ b/include/vservices/protocol/core.h
@@ -0,0 +1,145 @@
+/*
+ * include/vservices/protocol/core.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * These are the common generated definitions for the core protocol drivers;
+ * specifically the message IDs and the protocol state representation.
+ *
+ * This is currently hand-generated, but will eventually be autogenerated,
+ * from the protocol specifications in core.vs. Please keep it consistent
+ * with that file.
+ */
+
+#define VSERVICE_CORE_PROTOCOL_NAME "com.ok-labs.core"
+#define VSERVICE_CORE_PARAM_SIZE_SERVICE_INFO__PROTOCOL_NAME 32
+#define VSERVICE_CORE_PARAM_SIZE_SERVICE_INFO__SERVICE_NAME 16
+
+/*
+ * Identifiers for in-band messages.
+ *
+ * This definition applies in both directions, because there is no practical
+ * limit on message IDs (services are unlikely to define 2^16 distinct message
+ * names).
+ */
+typedef enum {
+	/** simple_protocol core **/
+	/* message out startup */
+	VSERVICE_CORE_MSG_STARTUP,
+
+	/* message out shutdown */
+	VSERVICE_CORE_MSG_SHUTDOWN,
+
+	/* command in sync connect */
+	VSERVICE_CORE_REQ_CONNECT,
+	VSERVICE_CORE_ACK_CONNECT,
+	VSERVICE_CORE_NACK_CONNECT,
+
+	/* command in sync disconnect */
+	VSERVICE_CORE_REQ_DISCONNECT,
+	VSERVICE_CORE_ACK_DISCONNECT,
+	VSERVICE_CORE_NACK_DISCONNECT,
+
+	/* command in service_count */
+	VSERVICE_CORE_REQ_SERVICE_COUNT,
+	VSERVICE_CORE_ACK_SERVICE_COUNT,
+	VSERVICE_CORE_NACK_SERVICE_COUNT,
+
+	/* command in queued service_info */
+	VSERVICE_CORE_REQ_SERVICE_INFO,
+	VSERVICE_CORE_ACK_SERVICE_INFO,
+	VSERVICE_CORE_NACK_SERVICE_INFO,
+
+	/* message inout service_reset */
+	VSERVICE_CORE_MSG_SERVICE_RESET,
+
+	/* message inout service_ready */
+	VSERVICE_CORE_MSG_SERVICE_READY,
+
+	/* message out notification bits */
+	VSERVICE_CORE_MSG_NOTIFICATION_BITS_INFO,
+
+} vservice_core_message_id_t;
+
+/*
+ * Notification bits are defined separately for each direction because there
+ * is relatively limited space to allocate them from (specifically, the bits in
+ * a machine word). It is unlikely but possible for a protocol to reach this
+ * limit.
+ */
+
+/* Bits in the in (client -> server) notification bitmask. */
+typedef enum {
+	/** simple_protocol core **/
+	/* No in notifications */
+
+	VSERVICE_CORE_NBIT_IN__COUNT = 0,
+} vservice_core_nbit_in_t;
+
+/* Masks for the in notification bits */
+/* No in notifications */
+
+/* Bits in the out (server -> client) notification bitmask. */
+typedef enum {
+	/** simple_protocol core **/
+	/* notification out reenumerate */
+	VSERVICE_CORE_NBIT_OUT_REENUMERATE = 0,
+
+	VSERVICE_CORE_NBIT_OUT__COUNT,
+} vservice_core_nbit_out_t;
+
+/* Masks for the out notification bits */
+#define VSERVICE_CORE_NMASK_OUT_REENUMERATE \
+		(1 << VSERVICE_CORE_NBIT_OUT_REENUMERATE)
+
+/* Valid states of the interface's generated state machine. */
+typedef enum {
+	/* state offline */
+	VSERVICE_CORE_STATE_OFFLINE = 0,
+
+	/* state disconnected */
+	VSERVICE_CORE_STATE_DISCONNECTED,
+	VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+
+	/* state connected */
+	VSERVICE_CORE_STATE_CONNECTED,
+	VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+
+	/* reset offline */
+	VSERVICE_CORE_STATE__RESET = VSERVICE_CORE_STATE_OFFLINE,
+} vservice_core_statenum_t;
+
+typedef struct {
+	vservice_core_statenum_t statenum;
+	bool pending_service_count;
+	unsigned pending_service_info;
+} vservice_core_state_t;
+
+#define VSERVICE_CORE_RESET_STATE (vservice_core_state_t) { \
+	.statenum = VSERVICE_CORE_STATE__RESET, \
+	.pending_service_count = false, \
+	.pending_service_info = 0 }
+
+#define VSERVICE_CORE_STATE_IS_OFFLINE(state) ( \
+	((state).statenum == VSERVICE_CORE_STATE_OFFLINE))
+#define VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ( \
+	((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED) || \
+	((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__CONNECT))
+#define VSERVICE_CORE_STATE_IS_CONNECTED(state) ( \
+	((state).statenum == VSERVICE_CORE_STATE_CONNECTED) || \
+	((state).statenum == VSERVICE_CORE_STATE_CONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_VALID(state) \
+	VSERVICE_CORE_STATE_IS_OFFLINE(state) ? ( \
+		((state).pending_service_count == false) && \
+		((state).pending_service_info == 0)) : \
+	VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ? ( \
+		((state).pending_service_count == false) && \
+		((state).pending_service_info == 0)) : \
+	VSERVICE_CORE_STATE_IS_CONNECTED(state) ? true : \
+	false)
diff --git a/include/vservices/protocol/core/Kbuild b/include/vservices/protocol/core/Kbuild
new file mode 100644
index 0000000..ec3cbe8
--- /dev/null
+++ b/include/vservices/protocol/core/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/core/client.h b/include/vservices/protocol/core/client.h
new file mode 100644
index 0000000..3d52999
--- /dev/null
+++ b/include/vservices/protocol/core/client.h
@@ -0,0 +1,155 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_CORE__)
+#define __VSERVICES_CLIENT_CORE__
+
+struct vs_service_device;
+struct vs_client_core_state;
+
+struct vs_client_core {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+    /** session setup **/
+	struct vs_client_core_state *(*alloc) (struct vs_service_device *
+					       service);
+	void (*release) (struct vs_client_core_state * _state);
+
+	struct vs_service_driver *driver;
+
+	/** Core service base interface **/
+	void (*start) (struct vs_client_core_state * _state);
+	void (*reset) (struct vs_client_core_state * _state);
+    /** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_client_core_state * _state);
+
+	struct {
+		int (*state_change) (struct vs_client_core_state * _state,
+				     vservice_core_statenum_t old,
+				     vservice_core_statenum_t new);
+
+		int (*ack_connect) (struct vs_client_core_state * _state);
+		int (*nack_connect) (struct vs_client_core_state * _state);
+
+		int (*ack_disconnect) (struct vs_client_core_state * _state);
+		int (*nack_disconnect) (struct vs_client_core_state * _state);
+
+		int (*msg_startup) (struct vs_client_core_state * _state,
+				    uint32_t core_in_quota,
+				    uint32_t core_out_quota);
+
+		int (*msg_shutdown) (struct vs_client_core_state * _state);
+
+		int (*msg_service_created) (struct vs_client_core_state *
+					    _state, uint32_t service_id,
+					    struct vs_string service_name,
+					    struct vs_string protocol_name,
+					    struct vs_mbuf * _mbuf);
+
+		int (*msg_service_removed) (struct vs_client_core_state *
+					    _state, uint32_t service_id);
+
+		int (*msg_server_ready) (struct vs_client_core_state * _state,
+					 uint32_t service_id, uint32_t in_quota,
+					 uint32_t out_quota,
+					 uint32_t in_bit_offset,
+					 uint32_t in_num_bits,
+					 uint32_t out_bit_offset,
+					 uint32_t out_num_bits);
+
+		int (*msg_service_reset) (struct vs_client_core_state * _state,
+					  uint32_t service_id);
+
+	} core;
+};
+
+struct vs_client_core_state {
+	vservice_core_protocol_state_t state;
+	struct vs_service_device *service;
+	bool released;
+};
+
+extern int vs_client_core_reopen(struct vs_client_core_state *_state);
+
+extern int vs_client_core_close(struct vs_client_core_state *_state);
+
+    /** interface core **/
+/* command sync connect */
+extern int vs_client_core_core_req_connect(struct vs_client_core_state *_state,
+					   gfp_t flags);
+
+	/* command sync disconnect */
+extern int vs_client_core_core_req_disconnect(struct vs_client_core_state
+					      *_state, gfp_t flags);
+
+	/* message startup */
+/* message shutdown */
+/* message service_created */
+extern int vs_client_core_core_getbufs_service_created(struct
+						       vs_client_core_state
+						       *_state,
+						       struct vs_string
+						       *service_name,
+						       struct vs_string
+						       *protocol_name,
+						       struct vs_mbuf *_mbuf);
+extern int vs_client_core_core_free_service_created(struct vs_client_core_state
+						    *_state,
+						    struct vs_string
+						    *service_name,
+						    struct vs_string
+						    *protocol_name,
+						    struct vs_mbuf *_mbuf);
+    /* message service_removed */
+/* message server_ready */
+/* message service_reset */
+extern int vs_client_core_core_send_service_reset(struct vs_client_core_state
+						  *_state, uint32_t service_id,
+						  gfp_t flags);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_core_client_register(struct vs_client_core *client,
+					   const char *name,
+					   struct module *owner);
+
+static inline int vservice_core_client_register(struct vs_client_core *client,
+						const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_core_client_register(client, name, this_module);
+}
+
+extern int vservice_core_client_unregister(struct vs_client_core *client);
+
+#endif				/* ! __VSERVICES_CLIENT_CORE__ */
diff --git a/include/vservices/protocol/core/common.h b/include/vservices/protocol/core/common.h
new file mode 100644
index 0000000..b496416
--- /dev/null
+++ b/include/vservices/protocol/core/common.h
@@ -0,0 +1,38 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CORE_PROTOCOL_H__)
+#define __VSERVICES_CORE_PROTOCOL_H__
+
+#define VSERVICE_CORE_PROTOCOL_NAME "com.ok-labs.core"
+typedef enum {
+	VSERVICE_CORE_CORE_REQ_CONNECT,
+	VSERVICE_CORE_CORE_ACK_CONNECT,
+	VSERVICE_CORE_CORE_NACK_CONNECT,
+	VSERVICE_CORE_CORE_REQ_DISCONNECT,
+	VSERVICE_CORE_CORE_ACK_DISCONNECT,
+	VSERVICE_CORE_CORE_NACK_DISCONNECT,
+	VSERVICE_CORE_CORE_MSG_STARTUP,
+	VSERVICE_CORE_CORE_MSG_SHUTDOWN,
+	VSERVICE_CORE_CORE_MSG_SERVICE_CREATED,
+	VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED,
+	VSERVICE_CORE_CORE_MSG_SERVER_READY,
+	VSERVICE_CORE_CORE_MSG_SERVICE_RESET,
+} vservice_core_message_id_t;
+typedef enum {
+	VSERVICE_CORE_NBIT_IN__COUNT
+} vservice_core_nbit_in_t;
+
+typedef enum {
+	VSERVICE_CORE_NBIT_OUT__COUNT
+} vservice_core_nbit_out_t;
+
+/* Notification mask macros */
+#endif				/* ! __VSERVICES_CORE_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/core/server.h b/include/vservices/protocol/core/server.h
new file mode 100644
index 0000000..959b8c3
--- /dev/null
+++ b/include/vservices/protocol/core/server.h
@@ -0,0 +1,171 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_CORE)
+#define VSERVICES_SERVER_CORE
+
+struct vs_service_device;
+struct vs_server_core_state;
+
+struct vs_server_core {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+
+	/*
+	 * These are the driver's recommended message quotas. They are used
+	 * by the core service to select message quotas for services with no
+	 * explicitly configured quotas.
+	 */
+	u32 in_quota_best;
+	u32 out_quota_best;
+    /** session setup **/
+	struct vs_server_core_state *(*alloc) (struct vs_service_device *
+					       service);
+	void (*release) (struct vs_server_core_state * _state);
+
+	struct vs_service_driver *driver;
+
+	/** Core service base interface **/
+	void (*start) (struct vs_server_core_state * _state);
+	void (*reset) (struct vs_server_core_state * _state);
+    /** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_server_core_state * _state);
+
+	struct {
+		int (*state_change) (struct vs_server_core_state * _state,
+				     vservice_core_statenum_t old,
+				     vservice_core_statenum_t new);
+
+		int (*req_connect) (struct vs_server_core_state * _state);
+
+		int (*req_disconnect) (struct vs_server_core_state * _state);
+
+		int (*msg_service_reset) (struct vs_server_core_state * _state,
+					  uint32_t service_id);
+
+	} core;
+};
+
+struct vs_server_core_state {
+	vservice_core_protocol_state_t state;
+	struct vs_service_device *service;
+	bool released;
+};
+
+/** Complete calls for server core functions **/
+
+    /** interface core **/
+/* command sync connect */
+extern int vs_server_core_core_send_ack_connect(struct vs_server_core_state
+						*_state, gfp_t flags);
+extern int vs_server_core_core_send_nack_connect(struct vs_server_core_state
+						 *_state, gfp_t flags);
+    /* command sync disconnect */
+extern int vs_server_core_core_send_ack_disconnect(struct vs_server_core_state
+						   *_state, gfp_t flags);
+extern int vs_server_core_core_send_nack_disconnect(struct vs_server_core_state
+						    *_state, gfp_t flags);
+    /* message startup */
+extern int vs_server_core_core_send_startup(struct vs_server_core_state *_state,
+					    uint32_t core_in_quota,
+					    uint32_t core_out_quota,
+					    gfp_t flags);
+
+	    /* message shutdown */
+extern int vs_server_core_core_send_shutdown(struct vs_server_core_state
+					     *_state, gfp_t flags);
+
+	    /* message service_created */
+extern struct vs_mbuf *vs_server_core_core_alloc_service_created(struct
+								 vs_server_core_state
+								 *_state,
+								 struct
+								 vs_string
+								 *service_name,
+								 struct
+								 vs_string
+								 *protocol_name,
+								 gfp_t flags);
+extern int vs_server_core_core_free_service_created(struct vs_server_core_state
+						    *_state,
+						    struct vs_string
+						    *service_name,
+						    struct vs_string
+						    *protocol_name,
+						    struct vs_mbuf *_mbuf);
+extern int vs_server_core_core_send_service_created(struct vs_server_core_state
+						    *_state,
+						    uint32_t service_id,
+						    struct vs_string
+						    service_name,
+						    struct vs_string
+						    protocol_name,
+						    struct vs_mbuf *_mbuf);
+
+	    /* message service_removed */
+extern int vs_server_core_core_send_service_removed(struct vs_server_core_state
+						    *_state,
+						    uint32_t service_id,
+						    gfp_t flags);
+
+	    /* message server_ready */
+extern int vs_server_core_core_send_server_ready(struct vs_server_core_state
+						 *_state, uint32_t service_id,
+						 uint32_t in_quota,
+						 uint32_t out_quota,
+						 uint32_t in_bit_offset,
+						 uint32_t in_num_bits,
+						 uint32_t out_bit_offset,
+						 uint32_t out_num_bits,
+						 gfp_t flags);
+
+	    /* message service_reset */
+extern int vs_server_core_core_send_service_reset(struct vs_server_core_state
+						  *_state, uint32_t service_id,
+						  gfp_t flags);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_core_server_register(struct vs_server_core *server,
+					   const char *name,
+					   struct module *owner);
+
+static inline int vservice_core_server_register(struct vs_server_core *server,
+						const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_core_server_register(server, name, this_module);
+}
+
+extern int vservice_core_server_unregister(struct vs_server_core *server);
+#endif				/* ! VSERVICES_SERVER_CORE */
diff --git a/include/vservices/protocol/core/types.h b/include/vservices/protocol/core/types.h
new file mode 100644
index 0000000..2d6928d
--- /dev/null
+++ b/include/vservices/protocol/core/types.h
@@ -0,0 +1,87 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_CORE_TYPES_H)
+#define VSERVICES_CORE_TYPES_H
+
+#define VSERVICE_CORE_SERVICE_NAME_SIZE (uint32_t)16
+
+#define VSERVICE_CORE_PROTOCOL_NAME_SIZE (uint32_t)32
+
+typedef enum {
+/* state offline */
+	VSERVICE_CORE_STATE_OFFLINE = 0,
+	VSERVICE_CORE_STATE_OFFLINE__CONNECT,
+	VSERVICE_CORE_STATE_OFFLINE__DISCONNECT,
+
+/* state disconnected */
+	VSERVICE_CORE_STATE_DISCONNECTED,
+	VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+	VSERVICE_CORE_STATE_DISCONNECTED__DISCONNECT,
+
+/* state connected */
+	VSERVICE_CORE_STATE_CONNECTED,
+	VSERVICE_CORE_STATE_CONNECTED__CONNECT,
+	VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+
+	VSERVICE_CORE__RESET = VSERVICE_CORE_STATE_OFFLINE
+} vservice_core_statenum_t;
+
+typedef struct {
+	vservice_core_statenum_t statenum;
+} vservice_core_state_t;
+
+#define VSERVICE_CORE_RESET_STATE (vservice_core_state_t) { \
+.statenum = VSERVICE_CORE__RESET}
+
+#define VSERVICE_CORE_STATE_IS_OFFLINE(state) (\
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE) || \
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_IS_DISCONNECTED(state) (\
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED) || \
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_IS_CONNECTED(state) (\
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED) || \
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_VALID(state) ( \
+VSERVICE_CORE_STATE_IS_OFFLINE(state) ? true : \
+VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ? true : \
+VSERVICE_CORE_STATE_IS_CONNECTED(state) ? true : \
+false)
+
+static inline const char *vservice_core_get_state_string(vservice_core_state_t
+							 state)
+{
+	static const char *names[] =
+	    { "offline", "offline__connect", "offline__disconnect",
+		"disconnected", "disconnected__connect",
+		    "disconnected__disconnect",
+		"connected", "connected__connect", "connected__disconnect"
+	};
+	if (!VSERVICE_CORE_STATE_VALID(state)) {
+		return "INVALID";
+	}
+	return names[state.statenum];
+}
+
+typedef struct {
+
+	vservice_core_state_t core;
+} vservice_core_protocol_state_t;
+
+#define VSERVICE_CORE_PROTOCOL_RESET_STATE (vservice_core_protocol_state_t) {\
+.core = VSERVICE_CORE_RESET_STATE }
+#endif				/* ! VSERVICES_CORE_TYPES_H */
diff --git a/include/vservices/protocol/serial/Kbuild b/include/vservices/protocol/serial/Kbuild
new file mode 100644
index 0000000..ec3cbe8
--- /dev/null
+++ b/include/vservices/protocol/serial/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/serial/client.h b/include/vservices/protocol/serial/client.h
new file mode 100644
index 0000000..78efed2e
--- /dev/null
+++ b/include/vservices/protocol/serial/client.h
@@ -0,0 +1,114 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_SERIAL__)
+#define __VSERVICES_CLIENT_SERIAL__
+
+struct vs_service_device;
+struct vs_client_serial_state;
+
+struct vs_client_serial {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+    /** session setup **/
+	struct vs_client_serial_state *(*alloc) (struct vs_service_device *
+						 service);
+	void (*release) (struct vs_client_serial_state * _state);
+
+	struct vs_service_driver *driver;
+
+/** Opened, reopened and closed functions **/
+
+	void (*opened) (struct vs_client_serial_state * _state);
+
+	void (*reopened) (struct vs_client_serial_state * _state);
+
+	void (*closed) (struct vs_client_serial_state * _state);
+
+/** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_client_serial_state * _state);
+
+	struct {
+		int (*msg_msg) (struct vs_client_serial_state * _state,
+				struct vs_pbuf b, struct vs_mbuf * _mbuf);
+
+	} serial;
+};
+
+struct vs_client_serial_state {
+	vservice_serial_protocol_state_t state;
+	uint32_t packet_size;
+	struct {
+		uint32_t packet_size;
+	} serial;
+	struct vs_service_device *service;
+	bool released;
+};
+
+extern int vs_client_serial_reopen(struct vs_client_serial_state *_state);
+
+extern int vs_client_serial_close(struct vs_client_serial_state *_state);
+
+    /** interface serial **/
+/* message msg */
+extern struct vs_mbuf *vs_client_serial_serial_alloc_msg(struct
+							 vs_client_serial_state
+							 *_state,
+							 struct vs_pbuf *b,
+							 gfp_t flags);
+extern int vs_client_serial_serial_getbufs_msg(struct vs_client_serial_state
+					       *_state, struct vs_pbuf *b,
+					       struct vs_mbuf *_mbuf);
+extern int vs_client_serial_serial_free_msg(struct vs_client_serial_state
+					    *_state, struct vs_pbuf *b,
+					    struct vs_mbuf *_mbuf);
+extern int vs_client_serial_serial_send_msg(struct vs_client_serial_state
+					    *_state, struct vs_pbuf b,
+					    struct vs_mbuf *_mbuf);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_serial_client_register(struct vs_client_serial *client,
+					     const char *name,
+					     struct module *owner);
+
+static inline int vservice_serial_client_register(struct vs_client_serial
+						  *client, const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_serial_client_register(client, name, this_module);
+}
+
+extern int vservice_serial_client_unregister(struct vs_client_serial *client);
+
+#endif				/* ! __VSERVICES_CLIENT_SERIAL__ */
diff --git a/include/vservices/protocol/serial/common.h b/include/vservices/protocol/serial/common.h
new file mode 100644
index 0000000..a530645
--- /dev/null
+++ b/include/vservices/protocol/serial/common.h
@@ -0,0 +1,37 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_SERIAL_PROTOCOL_H__)
+#define __VSERVICES_SERIAL_PROTOCOL_H__
+
+#define VSERVICE_SERIAL_PROTOCOL_NAME "com.ok-labs.serial"
+typedef enum {
+	VSERVICE_SERIAL_BASE_REQ_OPEN,
+	VSERVICE_SERIAL_BASE_ACK_OPEN,
+	VSERVICE_SERIAL_BASE_NACK_OPEN,
+	VSERVICE_SERIAL_BASE_REQ_CLOSE,
+	VSERVICE_SERIAL_BASE_ACK_CLOSE,
+	VSERVICE_SERIAL_BASE_NACK_CLOSE,
+	VSERVICE_SERIAL_BASE_REQ_REOPEN,
+	VSERVICE_SERIAL_BASE_ACK_REOPEN,
+	VSERVICE_SERIAL_BASE_NACK_REOPEN,
+	VSERVICE_SERIAL_BASE_MSG_RESET,
+	VSERVICE_SERIAL_SERIAL_MSG_MSG,
+} vservice_serial_message_id_t;
+typedef enum {
+	VSERVICE_SERIAL_NBIT_IN__COUNT
+} vservice_serial_nbit_in_t;
+
+typedef enum {
+	VSERVICE_SERIAL_NBIT_OUT__COUNT
+} vservice_serial_nbit_out_t;
+
+/* Notification mask macros */
+#endif				/* ! __VSERVICES_SERIAL_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/serial/server.h b/include/vservices/protocol/serial/server.h
new file mode 100644
index 0000000..001fed5
--- /dev/null
+++ b/include/vservices/protocol/serial/server.h
@@ -0,0 +1,134 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_SERIAL)
+#define VSERVICES_SERVER_SERIAL
+
+struct vs_service_device;
+struct vs_server_serial_state;
+
+struct vs_server_serial {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+
+	/*
+	 * These are the driver's recommended message quotas. They are used
+	 * by the core service to select message quotas for services with no
+	 * explicitly configured quotas.
+	 */
+	u32 in_quota_best;
+	u32 out_quota_best;
+    /** session setup **/
+	struct vs_server_serial_state *(*alloc) (struct vs_service_device *
+						 service);
+	void (*release) (struct vs_server_serial_state * _state);
+
+	struct vs_service_driver *driver;
+
+/** Open, reopen, close and closed functions **/
+
+	 vs_server_response_type_t(*open) (struct vs_server_serial_state *
+					   _state);
+
+	 vs_server_response_type_t(*reopen) (struct vs_server_serial_state *
+					     _state);
+
+	 vs_server_response_type_t(*close) (struct vs_server_serial_state *
+					    _state);
+
+	void (*closed) (struct vs_server_serial_state * _state);
+
+/** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_server_serial_state * _state);
+
+	struct {
+		int (*msg_msg) (struct vs_server_serial_state * _state,
+				struct vs_pbuf b, struct vs_mbuf * _mbuf);
+
+	} serial;
+};
+
+struct vs_server_serial_state {
+	vservice_serial_protocol_state_t state;
+	uint32_t packet_size;
+	struct {
+		uint32_t packet_size;
+	} serial;
+	struct vs_service_device *service;
+	bool released;
+};
+
+/** Complete calls for server core functions **/
+extern int vs_server_serial_open_complete(struct vs_server_serial_state *_state,
+					  vs_server_response_type_t resp);
+
+extern int vs_server_serial_close_complete(struct vs_server_serial_state
+					   *_state,
+					   vs_server_response_type_t resp);
+
+extern int vs_server_serial_reopen_complete(struct vs_server_serial_state
+					    *_state,
+					    vs_server_response_type_t resp);
+
+    /** interface serial **/
+/* message msg */
+extern struct vs_mbuf *vs_server_serial_serial_alloc_msg(struct
+							 vs_server_serial_state
+							 *_state,
+							 struct vs_pbuf *b,
+							 gfp_t flags);
+extern int vs_server_serial_serial_getbufs_msg(struct vs_server_serial_state
+					       *_state, struct vs_pbuf *b,
+					       struct vs_mbuf *_mbuf);
+extern int vs_server_serial_serial_free_msg(struct vs_server_serial_state
+					    *_state, struct vs_pbuf *b,
+					    struct vs_mbuf *_mbuf);
+extern int vs_server_serial_serial_send_msg(struct vs_server_serial_state
+					    *_state, struct vs_pbuf b,
+					    struct vs_mbuf *_mbuf);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_serial_server_register(struct vs_server_serial *server,
+					     const char *name,
+					     struct module *owner);
+
+static inline int vservice_serial_server_register(struct vs_server_serial
+						  *server, const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_serial_server_register(server, name, this_module);
+}
+
+extern int vservice_serial_server_unregister(struct vs_server_serial *server);
+#endif				/* ! VSERVICES_SERVER_SERIAL */
diff --git a/include/vservices/protocol/serial/types.h b/include/vservices/protocol/serial/types.h
new file mode 100644
index 0000000..46edf95
--- /dev/null
+++ b/include/vservices/protocol/serial/types.h
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERIAL_TYPES_H)
+#define VSERVICES_SERIAL_TYPES_H
+
+typedef enum {
+/* state closed */
+	VSERVICE_BASE_STATE_CLOSED = 0,
+	VSERVICE_BASE_STATE_CLOSED__OPEN,
+	VSERVICE_BASE_STATE_CLOSED__CLOSE,
+	VSERVICE_BASE_STATE_CLOSED__REOPEN,
+
+/* state running */
+	VSERVICE_BASE_STATE_RUNNING,
+	VSERVICE_BASE_STATE_RUNNING__OPEN,
+	VSERVICE_BASE_STATE_RUNNING__CLOSE,
+	VSERVICE_BASE_STATE_RUNNING__REOPEN,
+
+	VSERVICE_BASE__RESET = VSERVICE_BASE_STATE_CLOSED
+} vservice_base_statenum_t;
+
+typedef struct {
+	vservice_base_statenum_t statenum;
+} vservice_base_state_t;
+
+#define VSERVICE_BASE_RESET_STATE (vservice_base_state_t) { \
+.statenum = VSERVICE_BASE__RESET}
+
+#define VSERVICE_BASE_STATE_IS_CLOSED(state) (\
+((state).statenum == VSERVICE_BASE_STATE_CLOSED) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__REOPEN))
+
+#define VSERVICE_BASE_STATE_IS_RUNNING(state) (\
+((state).statenum == VSERVICE_BASE_STATE_RUNNING) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__REOPEN))
+
+#define VSERVICE_BASE_STATE_VALID(state) ( \
+VSERVICE_BASE_STATE_IS_CLOSED(state) ? true : \
+VSERVICE_BASE_STATE_IS_RUNNING(state) ? true : \
+false)
+
+static inline const char *vservice_base_get_state_string(vservice_base_state_t
+							 state)
+{
+	static const char *names[] =
+	    { "closed", "closed__open", "closed__close", "closed__reopen",
+		"running", "running__open", "running__close", "running__reopen"
+	};
+	if (!VSERVICE_BASE_STATE_VALID(state)) {
+		return "INVALID";
+	}
+	return names[state.statenum];
+}
+
+typedef struct {
+} vservice_serial_state_t;
+
+#define VSERVICE_SERIAL_RESET_STATE (vservice_serial_state_t) { \
+}
+
+#define VSERVICE_SERIAL_STATE_VALID(state) true
+
+typedef struct {
+
+	vservice_base_state_t base;
+
+	vservice_serial_state_t serial;
+} vservice_serial_protocol_state_t;
+
+#define VSERVICE_SERIAL_PROTOCOL_RESET_STATE (vservice_serial_protocol_state_t) {\
+.base = VSERVICE_BASE_RESET_STATE,\
+.serial = VSERVICE_SERIAL_RESET_STATE }
+
+#define VSERVICE_SERIAL_IS_STATE_RESET(state) \
+            ((state).base.statenum == VSERVICE_BASE__RESET)
+#endif				/* ! VSERVICES_SERIAL_TYPES_H */
diff --git a/include/vservices/service.h b/include/vservices/service.h
new file mode 100644
index 0000000..af232b6
--- /dev/null
+++ b/include/vservices/service.h
@@ -0,0 +1,674 @@
+/*
+ * include/vservices/service.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the driver and device types for vServices client and
+ * server drivers. These are generally defined by generated protocol-layer
+ * code. However, they can also be defined directly by applications that
+ * don't require protocol generation.
+ */
+
+#ifndef _VSERVICE_SERVICE_H_
+#define _VSERVICE_SERVICE_H_
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)
+#include <asm/atomic.h>
+#else
+#include <linux/atomic.h>
+#endif
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/types.h>
+
+struct vs_mbuf;
+
+/**
+ * struct vs_service_driver - Virtual service driver structure
+ * @protocol: Protocol name for this driver
+ * @is_server: True if this is a server driver, false if it is a client driver
+ * @rx_atomic: If set to false then the receive message handlers are run from
+ *	     workqueue context and are allowed to sleep. If set to true
+ *	     the message handlers are run from tasklet context and may not
+ *	     sleep. For this purpose, tx_ready is considered a receive
+ *	     message handler.
+ * @tx_atomic: If this is set to true along with rx_atomic, the driver is
+ *	allowed to send messages from softirq contexts other than the receive
+ *	message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ *	messages may only be sent from the receive message handlers, or from
+ *	task context after calling vs_service_state_lock.
+ * @probe: Probe function for this service
+ * @remove: Remove function for this service
+ * --- Callbacks ---
+ * @receive: Message handler function for this service
+ * @notify: Incoming notification handler function for this service
+ * @start: Callback which is run when this service is started
+ * @reset: Callback which is run when this service is reset
+ * @tx_ready: Callback which is run when the service has dropped below its
+ *	    send quota
+ * --- Resource requirements (valid for server only) ---
+ * @in_quota_min: minimum number of input messages for protocol functionality
+ * @in_quota_best: suggested number of input messages
+ * @out_quota_min: minimum number of output messages for protocol functionality
+ * @out_quota_best: suggested number of output messages
+ * @in_notify_count: number of input notification bits used
+ * @out_notify_count: number of output notification bits used
+ * --- Internal ---
+ * @driver: Linux device model driver structure
+ *
+ * The callback functions for a virtual service driver are all called from
+ * the virtual service device's work queue.
+ */
+struct vs_service_driver {
+	const char *protocol;
+	bool is_server;
+	bool rx_atomic, tx_atomic;
+
+	int (*probe)(struct vs_service_device *service);
+	int (*remove)(struct vs_service_device *service);
+
+	int (*receive)(struct vs_service_device *service,
+		struct vs_mbuf *mbuf);
+	void (*notify)(struct vs_service_device *service, u32 flags);
+
+	void (*start)(struct vs_service_device *service);
+	void (*reset)(struct vs_service_device *service);
+
+	int (*tx_ready)(struct vs_service_device *service);
+
+	unsigned in_quota_min;
+	unsigned in_quota_best;
+	unsigned out_quota_min;
+	unsigned out_quota_best;
+	unsigned in_notify_count;
+	unsigned out_notify_count;
+
+	struct device_driver driver;
+};
+
+#define to_vs_service_driver(d) \
+	container_of(d, struct vs_service_driver, driver)
+
+/* The vServices server/client bus types */
+extern struct bus_type vs_client_bus_type;
+extern struct bus_type vs_server_bus_type;
+
+/**
+ * struct vs_service_stats - Virtual service statistics
+ * @over_quota_time: Internal counter for tracking over quota time.
+ * @sent_mbufs: Total number of message buffers sent.
+ * @sent_bytes: Total bytes sent.
+ * @send_failures: Total number of send failures.
+ * @recv_mbufs: Total number of message buffers received.
+ * @recv_bytes: Total number of bytes recevied.
+ * @recv_failures: Total number of receive failures.
+ * @nr_over_quota: Number of times an mbuf allocation has failed because the
+ *                 service is over quota.
+ * @nr_tx_ready: Number of times the service has run its tx_ready handler
+ * @over_quota_time_total: The total amount of time in milli-seconds that the
+ *                         service has spent over quota. Measured as the time
+ *                         between exceeding quota in mbuf allocation and
+ *                         running the tx_ready handler.
+ * @over_quota_time_avg: The average amount of time in milli-seconds that the
+ *                       service is spending in the over quota state.
+ */
+struct vs_service_stats {
+	unsigned long	over_quota_time;
+
+	atomic_t        sent_mbufs;
+	atomic_t        sent_bytes;
+	atomic_t	send_failures;
+	atomic_t        recv_mbufs;
+	atomic_t        recv_bytes;
+	atomic_t	recv_failures;
+	atomic_t        nr_over_quota;
+	atomic_t        nr_tx_ready;
+	atomic_t        over_quota_time_total;
+	atomic_t        over_quota_time_avg;
+};
+
+/**
+ * struct vs_service_device - Virtual service device
+ * @id: Unique ID (to the session) for this service
+ * @name: Service name
+ * @sysfs_name: The sysfs name for the service
+ * @protocol: Service protocol name
+ * @is_server: True if this device is server, false if it is a client
+ * @owner: service responsible for managing this service. This must be
+ *     on the same session, and is NULL iff this is the core service.
+ *     It must not be a service whose driver has tx_atomic set.
+ * @lock_subclass: the number of generations of owners between this service
+ *     and the core service; 0 for the core service, 1 for anything directly
+ *     created by it, and so on. This is only used for verifying lock
+ *     ordering (when lockdep is enabled), hence the name.
+ * @ready_lock: mutex protecting readiness, disable_count and driver_probed.
+ *     This depends on the state_mutex of the service's owner, if any. Acquire
+ *     it using mutex_lock_nested(ready_lock, lock_subclass).
+ * @readiness: Service's readiness state, owned by session layer.
+ * @disable_count: Number of times the service has been disabled without
+ *     a matching enable.
+ * @driver_probed: True if a driver has been probed (and not removed)
+ * @work_queue: Work queue for this service's task-context work.
+ * @rx_tasklet: Tasklet for handling incoming messages. This is only used
+ *     if the service driver has rx_atomic set to true. Otherwise
+ *     incoming messages are handled on the workqueue by rx_work.
+ * @rx_work: Work structure for handling incoming messages. This is only
+ *     used if the service driver has rx_atomic set to false.
+ * @rx_lock: Spinlock which protects access to rx_queue and tx_ready
+ * @rx_queue: Queue of incoming messages
+ * @tx_ready: Flag indicating that a tx_ready event is pending
+ * @tx_batching: Flag indicating that outgoing messages are being batched
+ * @state_spinlock: spinlock used to protect the service state if the
+ *     service driver has tx_atomic (and rx_atomic) set to true. This
+ *     depends on the service's ready_lock. Acquire it only by
+ *     calling vs_service_state_lock_bh().
+ * @state_mutex: mutex used to protect the service state if the service
+ *     driver has tx_atomic set to false. This depends on the service's
+ *     ready_lock, and if rx_atomic is true, the rx_tasklet must be
+ *     disabled while it is held. Acquire it only by calling
+ *     vs_service_state_lock().
+ * @state_spinlock_used: Flag to check if the state spinlock has been acquired.
+ * @state_mutex_used: Flag to check if the state mutex has been acquired.
+ * @reset_work: Work to reset the service after a driver fails
+ * @pending_reset: Set if reset_work has been queued and not completed.
+ * @ready_work: Work to make service ready after a throttling delay
+ * @cooloff_work: Work for cooling off reset throttling after the reset
+ * throttling limit was hit
+ * @cleanup_work: Work for cleaning up and freeing the service structure
+ * @last_reset: Time in jiffies at which this service last reset
+ * @last_reset_request: Time in jiffies the last reset request for this
+ *     service occurred at
+ * @last_ready: Time in jiffies at which this service last became ready
+ * @reset_delay: Time in jiffies that the next throttled reset will be
+ *     delayed for. A value of zero means that reset throttling is not in
+ *     effect.
+ * @is_over_quota: Internal flag for whether the service is over quota. This
+ *                 flag is only used for stats accounting.
+ * @quota_wq: waitqueue that is woken whenever the available send quota
+ *            increases.
+ * @notify_send_bits: The number of bits allocated for outgoing notifications.
+ * @notify_send_offset: The first bit allocated for outgoing notifications.
+ * @notify_recv_bits: The number of bits allocated for incoming notifications.
+ * @notify_recv_offset: The first bit allocated for incoming notifications.
+ * @send_quota: The maximum number of outgoing messages.
+ * @recv_quota: The maximum number of incoming messages.
+ * @in_quota_set: For servers, the number of client->server messages
+ *     requested during system configuration (sysfs or environment).
+ * @out_quota_set: For servers, the number of server->client messages
+ *     requested during system configuration (sysfs or environment).
+ * @dev: Linux device model device structure
+ * @stats: Service statistics
+ */
+struct vs_service_device {
+	vs_service_id_t id;
+	char *name;
+	char *sysfs_name;
+	char *protocol;
+	bool is_server;
+
+	struct vs_service_device *owner;
+	unsigned lock_subclass;
+
+	struct mutex ready_lock;
+	unsigned readiness;
+	int disable_count;
+	bool driver_probed;
+
+	struct workqueue_struct *work_queue;
+
+	struct tasklet_struct rx_tasklet;
+	struct work_struct rx_work;
+
+	spinlock_t rx_lock;
+	struct list_head rx_queue;
+	bool tx_ready, tx_batching;
+
+	spinlock_t state_spinlock;
+	struct mutex state_mutex;
+
+	struct work_struct reset_work;
+	bool pending_reset;
+	struct delayed_work ready_work;
+	struct delayed_work cooloff_work;
+	struct work_struct cleanup_work;
+
+	unsigned long last_reset;
+	unsigned long last_reset_request;
+	unsigned long last_ready;
+	unsigned long reset_delay;
+
+	atomic_t is_over_quota;
+	wait_queue_head_t quota_wq;
+
+	unsigned notify_send_bits;
+	unsigned notify_send_offset;
+	unsigned notify_recv_bits;
+	unsigned notify_recv_offset;
+	unsigned send_quota;
+	unsigned recv_quota;
+
+	unsigned in_quota_set;
+	unsigned out_quota_set;
+
+	void *transport_priv;
+
+	struct device dev;
+	struct vs_service_stats stats;
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	bool state_spinlock_used;
+	bool state_mutex_used;
+#endif
+};
+
+#define to_vs_service_device(d) container_of(d, struct vs_service_device, dev)
+
+/**
+ * vs_service_get_session - Return the session for a service
+ * @service: Service to get the session for
+ */
+static inline struct vs_session_device *
+vs_service_get_session(struct vs_service_device *service)
+{
+	return to_vs_session_device(service->dev.parent);
+}
+
+/**
+ * vs_service_send - Send a message from a service
+ * @service: Service to send the message from
+ * @mbuf: Message buffer to send
+ */
+static inline int
+vs_service_send(struct vs_service_device *service, struct vs_mbuf *mbuf)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	const struct vs_transport_vtable *vt = session->transport->vt;
+	const unsigned long flags =
+		service->tx_batching ?  VS_TRANSPORT_SEND_FLAGS_MORE : 0;
+	size_t msg_size = vt->mbuf_size(mbuf);
+	int err;
+
+	err = vt->send(session->transport, service, mbuf, flags);
+	if (!err) {
+		atomic_inc(&service->stats.sent_mbufs);
+		atomic_add(msg_size, &service->stats.sent_bytes);
+	} else {
+		atomic_inc(&service->stats.send_failures);
+	}
+
+	return err;
+}
+
+/**
+ * vs_service_alloc_mbuf - Allocate a message buffer for a service
+ * @service: Service to allocate the buffer for
+ * @size: Size of the data buffer to allocate
+ * @flags: Flags to pass to the buffer allocation
+ */
+static inline struct vs_mbuf *
+vs_service_alloc_mbuf(struct vs_service_device *service, size_t size,
+		gfp_t flags)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_mbuf *mbuf;
+
+	mbuf = session->transport->vt->alloc_mbuf(session->transport,
+			service, size, flags);
+	if (IS_ERR(mbuf) && PTR_ERR(mbuf) == -ENOBUFS) {
+		/* Over quota accounting */
+		if (atomic_cmpxchg(&service->is_over_quota, 0, 1) == 0) {
+			service->stats.over_quota_time = jiffies;
+			atomic_inc(&service->stats.nr_over_quota);
+		}
+	}
+
+	/*
+	 * The transport drivers should return either a valid message buffer
+	 * pointer or an ERR_PTR value. Warn here if a transport driver is
+	 * returning NULL on message buffer allocation failure.
+	 */
+	if (WARN_ON_ONCE(!mbuf))
+		return ERR_PTR(-ENOMEM);
+
+	return mbuf;
+}
+
+/**
+ * vs_service_free_mbuf - Deallocate a message buffer for a service
+ * @service: Service the message buffer was allocated for
+ * @mbuf: Message buffer to deallocate
+ */
+static inline void
+vs_service_free_mbuf(struct vs_service_device *service, struct vs_mbuf *mbuf)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	session->transport->vt->free_mbuf(session->transport, service, mbuf);
+}
+
+/**
+ * vs_service_notify - Send a notification from a service
+ * @service: Service to send the notification from
+ * @flags: Notification bits to send
+ */
+static inline int
+vs_service_notify(struct vs_service_device *service, u32 flags)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	return session->transport->vt->notify(session->transport,
+			service, flags);
+}
+
+/**
+ * vs_service_has_atomic_rx - Return whether or not a service's receive
+ * message handler runs in atomic context. This function should only be
+ * called for services which are bound to a driver.
+ *
+ * @service: Service to check
+ */
+static inline bool
+vs_service_has_atomic_rx(struct vs_service_device *service)
+{
+	if (WARN_ON(!service->dev.driver))
+		return false;
+
+	return to_vs_service_driver(service->dev.driver)->rx_atomic;
+}
+
+/**
+ * vs_session_max_mbuf_size - Return the maximum allocation size of a message
+ * buffer.
+ * @service: The service to check
+ */
+static inline size_t
+vs_service_max_mbuf_size(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	return session->transport->vt->max_mbuf_size(session->transport);
+}
+
+/**
+ * vs_service_send_mbufs_available - Return the number of mbufs which can be
+ * allocated for sending before going over quota.
+ * @service: The service to check
+ */
+static inline ssize_t
+vs_service_send_mbufs_available(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	return session->transport->vt->service_send_avail(session->transport,
+			service);
+}
+
+/**
+ * vs_service_has_atomic_tx - Return whether or not a service is allowed to
+ * transmit from atomic context (other than its receive message handler).
+ * This function should only be called for services which are bound to a
+ * driver.
+ *
+ * @service: Service to check
+ */
+static inline bool
+vs_service_has_atomic_tx(struct vs_service_device *service)
+{
+	if (WARN_ON(!service->dev.driver))
+		return false;
+
+	return to_vs_service_driver(service->dev.driver)->tx_atomic;
+}
+
+/**
+ * vs_service_state_lock - Acquire a lock allowing service state operations
+ * from external task contexts.
+ *
+ * @service: Service to lock.
+ *
+ * This must be used to protect any service state accesses that occur in task
+ * contexts outside of a callback from the vservices protocol layer. It must
+ * not be called from a protocol layer callback, nor from atomic context.
+ *
+ * If this service's state is also accessed from softirq contexts other than
+ * vservices protocol layer callbacks, use vs_service_state_lock_bh instead,
+ * and set the driver's tx_atomic flag.
+ *
+ * If this is called from outside the service's workqueue, the calling driver
+ * must provide its own guarantee that it has not been detached from the
+ * service. If that is not possible, use vs_state_lock_safe().
+ */
+static inline void
+vs_service_state_lock(struct vs_service_device *service)
+__acquires(service)
+{
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	WARN_ON_ONCE(vs_service_has_atomic_tx(service));
+#endif
+
+	mutex_lock_nested(&service->state_mutex, service->lock_subclass);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	if (WARN_ON_ONCE(service->state_spinlock_used))
+		dev_err(&service->dev, "Service is using both the state spinlock and mutex - Fix your driver\n");
+	service->state_mutex_used = true;
+#endif
+
+	if (vs_service_has_atomic_rx(service))
+		tasklet_disable(&service->rx_tasklet);
+
+	__acquire(service);
+}
+
+/**
+ * vs_service_state_unlock - Release the lock acquired by vs_service_state_lock.
+ *
+ * @service: Service to unlock.
+ */
+static inline void
+vs_service_state_unlock(struct vs_service_device *service)
+__releases(service)
+{
+	__release(service);
+
+	mutex_unlock(&service->state_mutex);
+
+	if (vs_service_has_atomic_rx(service)) {
+		tasklet_enable(&service->rx_tasklet);
+
+		/* Kick the tasklet if there is RX work to do */
+		if (!list_empty(&service->rx_queue))
+			tasklet_schedule(&service->rx_tasklet);
+	}
+}
+
+/**
+ * vs_service_state_lock_bh - Acquire a lock allowing service state operations
+ * from external task or softirq contexts.
+ *
+ * @service: Service to lock.
+ *
+ * This is an alternative to vs_service_state_lock for drivers that receive
+ * messages in atomic context (i.e. have their rx_atomic flag set), *and* must
+ * transmit messages from softirq contexts other than their own message
+ * receive and tx_ready callbacks. Such drivers must set their tx_atomic
+ * flag, so generated protocol drivers perform correct locking.
+ *
+ * This should replace all calls to vs_service_state_lock for services that
+ * need it. Do not use both locking functions in one service driver.
+ *
+ * The calling driver must provide its own guarantee that it has not been
+ * detached from the service. If that is not possible, use
+ * vs_state_lock_safe_bh().
+ */
+static inline void
+vs_service_state_lock_bh(struct vs_service_device *service)
+__acquires(service)
+__acquires(&service->state_spinlock)
+{
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	WARN_ON_ONCE(!vs_service_has_atomic_rx(service));
+	WARN_ON_ONCE(!vs_service_has_atomic_tx(service));
+#endif
+
+#ifdef CONFIG_SMP
+	/* Not necessary on UP because it's implied by spin_lock_bh(). */
+	tasklet_disable(&service->rx_tasklet);
+#endif
+
+	spin_lock_bh(&service->state_spinlock);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	if (WARN_ON_ONCE(service->state_mutex_used))
+		dev_err(&service->dev, "Service is using both the state spinlock and mutex - Fix your driver\n");
+	service->state_spinlock_used = true;
+#endif
+
+	__acquire(service);
+}
+
+/**
+ * vs_service_state_unlock_bh - Release the lock acquired by
+ * vs_service_state_lock_bh.
+ *
+ * @service: Service to unlock.
+ */
+static inline void
+vs_service_state_unlock_bh(struct vs_service_device *service)
+__releases(service)
+__releases(&service->state_spinlock)
+{
+	__release(service);
+
+	spin_unlock_bh(&service->state_spinlock);
+
+#ifdef CONFIG_SMP
+	tasklet_enable(&service->rx_tasklet);
+#endif
+}
+
+/* Convenience macros for locking a state structure rather than a service. */
+#define vs_state_lock(state) vs_service_state_lock((state)->service)
+#define vs_state_unlock(state) vs_service_state_unlock((state)->service)
+#define vs_state_lock_bh(state) vs_service_state_lock_bh((state)->service)
+#define vs_state_unlock_bh(state) vs_service_state_unlock_bh((state)->service)
+
+/**
+ * vs_state_lock_safe[_bh] - Aqcuire a lock for a state structure's service,
+ * when the service may have been detached from the state.
+ *
+ * This is useful for blocking operations that can't easily be terminated
+ * before returning from the service reset handler, such as file I/O. To use
+ * this, the state structure should be reference-counted rather than freed in
+ * the release callback, and the driver should retain its own reference to the
+ * service until the state structure is freed.
+ *
+ * This macro acquires the lock and returns true if the state has not been
+ * detached from the service. Otherwise, it returns false.
+ *
+ * Note that the _bh variant cannot be used from atomic context, because it
+ * acquires a mutex.
+ */
+#define __vs_state_lock_safe(_state, _lock, _unlock) ({ \
+	bool __ok = true;						\
+	typeof(_state) __state = (_state);				\
+	struct vs_service_device *__service = __state->service;		\
+	mutex_lock_nested(&__service->ready_lock,			\
+			__service->lock_subclass);			\
+	__ok = !ACCESS_ONCE(__state->released);				\
+	if (__ok) {							\
+		_lock(__state);						\
+		__ok = !ACCESS_ONCE(__state->released);			\
+		if (!__ok)						\
+			_unlock(__state);				\
+	}								\
+	mutex_unlock(&__service->ready_lock);				\
+	__ok;								\
+})
+#define vs_state_lock_safe(_state) \
+	__vs_state_lock_safe((_state), vs_state_lock, vs_state_unlock)
+#define vs_state_lock_safe_bh(_state) \
+	__vs_state_lock_safe((_state), vs_state_lock_bh, vs_state_unlock_bh)
+
+/**
+ * vs_get_service - Get a reference to a service.
+ * @service: Service to get a reference to.
+ */
+static inline struct vs_service_device *
+vs_get_service(struct vs_service_device *service)
+{
+	if (service)
+		get_device(&service->dev);
+	return service;
+}
+
+/**
+ * vs_put_service - Put a reference to a service.
+ * @service: The service to put the reference to.
+ */
+static inline void
+vs_put_service(struct vs_service_device *service)
+{
+	put_device(&service->dev);
+}
+
+extern int vs_service_reset(struct vs_service_device *service,
+		struct vs_service_device *caller);
+extern void vs_service_reset_nosync(struct vs_service_device *service);
+
+/**
+ * vs_service_send_batch_start - Start a batch of outgoing messages
+ * @service: The service that is starting a batch
+ * @flush: Finish any previously started batch (if false, then duplicate
+ * calls to this function have no effect)
+ */
+static inline void
+vs_service_send_batch_start(struct vs_service_device *service, bool flush)
+{
+	if (flush && service->tx_batching) {
+		struct vs_session_device *session =
+			vs_service_get_session(service);
+		const struct vs_transport_vtable *vt = session->transport->vt;
+		if (vt->flush)
+			vt->flush(session->transport, service);
+	} else {
+		service->tx_batching = true;
+	}
+}
+
+/**
+ * vs_service_send_batch_end - End a batch of outgoing messages
+ * @service: The service that is ending a batch
+ * @flush: Start sending the batch immediately (if false, the batch will
+ * be flushed when the next message is sent)
+ */
+static inline void
+vs_service_send_batch_end(struct vs_service_device *service, bool flush)
+{
+	service->tx_batching = false;
+	if (flush) {
+		struct vs_session_device *session =
+			vs_service_get_session(service);
+		const struct vs_transport_vtable *vt = session->transport->vt;
+		if (vt->flush)
+			vt->flush(session->transport, service);
+	}
+}
+
+
+#endif /* _VSERVICE_SERVICE_H_ */
diff --git a/include/vservices/session.h b/include/vservices/session.h
new file mode 100644
index 0000000..b9dc775
--- /dev/null
+++ b/include/vservices/session.h
@@ -0,0 +1,161 @@
+/*
+ * include/vservices/session.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the device type for a vServices session attached to a
+ * transport. This should only be used by transport drivers, the vServices
+ * session code, and the inline transport-access functions defined in
+ * vservices/service.h.
+ *
+ * Drivers for these devices are defined internally by the vServices
+ * framework. Other drivers should not attach to these devices.
+ */
+
+#ifndef _VSERVICES_SESSION_H_
+#define _VSERVICES_SESSION_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+
+#include <vservices/types.h>
+
+struct vs_service_device;
+struct vs_mbuf;
+
+struct notifier_block;
+
+/**
+ * enum vs_notify_event_t - vService notifier events
+ *
+ * @VS_SESSION_NOTIFY_ADD: vService session added. Argument is a pointer to
+ * the vs_session_device. This notification is sent after the session has been
+ * added.
+ *
+ * @VS_SESSION_NOTIFY_REMOVE: vService session about to be removed. Argument is
+ * a pointer to the vs_session_device. This notification is sent before the
+ * session is removed.
+ */
+enum vs_notify_event_t {
+	VS_SESSION_NOTIFY_ADD,
+	VS_SESSION_NOTIFY_REMOVE,
+};
+
+/**
+ * struct vs_session_device - Session device
+ * @name: The unique human-readable name of this session.
+ * @is_server: True if this session is a server, false if client
+ * @transport: The transport device for this session
+ * @session_num: Unique ID for this session. Used for sysfs
+ * @session_lock: Mutex which protects any change to service presence or
+ *     readiness
+ * @core_service: The core service, if one has ever been registered. Once set,
+ *     this must remain valid and unchanged until the session driver is
+ *     removed. Writes are protected by the service_ids_lock.
+ * @services: Dynamic array of the services on this session. Protected by
+ *     service_ids_lock.
+ * @alloc_service_ids: Size of the session services array
+ * @service_ids_lock: Mutex protecting service array updates
+ * @activation_work: work structure for handling session activation & reset
+ * @activation_state: true if transport is currently active
+ * @fatal_error_work: work structure for handling fatal session failures
+ * @debug_mask: Debug level mask
+ * @list: Entry in the global session list
+ * @sysfs_entry: Kobject pointer pointing to session device in sysfs under
+ *     sys/vservices
+ * @dev: Device structure for the Linux device model
+ */
+struct vs_session_device {
+	char *name;
+	bool is_server;
+	struct vs_transport *transport;
+	int session_num;
+
+	struct mutex session_lock;
+
+	/*
+	 * The service_idr maintains the list of currently allocated services
+	 * on a session, and allows for recycling of service ids. The lock also
+	 * protects core_service.
+	 */
+	struct idr service_idr;
+	struct mutex service_idr_lock;
+	struct vs_service_device *core_service;
+
+	struct work_struct activation_work;
+	atomic_t activation_state;
+
+	struct work_struct fatal_error_work;
+
+	unsigned long debug_mask;
+
+	struct list_head list;
+	struct kobject *sysfs_entry;
+
+	struct device dev;
+};
+
+#define to_vs_session_device(d) \
+	container_of(d, struct vs_session_device, dev)
+
+extern struct vs_session_device *
+vs_session_register(struct vs_transport *transport, struct device *parent,
+		bool server, const char *transport_name);
+extern void vs_session_start(struct vs_session_device *session);
+extern void vs_session_unregister(struct vs_session_device *session);
+
+extern int vs_session_handle_message(struct vs_session_device *session,
+		struct vs_mbuf *mbuf, vs_service_id_t service_id);
+
+extern void vs_session_quota_available(struct vs_session_device *session,
+		vs_service_id_t service_id, unsigned count,
+		bool send_tx_ready);
+
+extern void vs_session_handle_notify(struct vs_session_device *session,
+		unsigned long flags, vs_service_id_t service_id);
+
+extern void vs_session_handle_reset(struct vs_session_device *session);
+extern void vs_session_handle_activate(struct vs_session_device *session);
+
+extern struct vs_service_device *
+vs_server_create_service(struct vs_session_device *session,
+		struct vs_service_device *parent, const char *name,
+		const char *protocol, const void *plat_data);
+extern int vs_server_destroy_service(struct vs_service_device *service,
+		struct vs_service_device *parent);
+
+extern void vs_session_register_notify(struct notifier_block *nb);
+extern void vs_session_unregister_notify(struct notifier_block *nb);
+
+extern int vs_session_unbind_driver(struct vs_service_device *service);
+
+extern void vs_session_for_each_service(struct vs_session_device *session,
+		void (*func)(struct vs_service_device *, void *), void *data);
+
+extern struct mutex vs_session_lock;
+extern int vs_session_for_each_locked(
+		int (*fn)(struct vs_session_device *session, void *data),
+		void *data);
+
+static inline int vs_session_for_each(
+		int (*fn)(struct vs_session_device *session, void *data),
+		void *data)
+{
+	int r;
+	mutex_lock(&vs_session_lock);
+	r = vs_session_for_each_locked(fn, data);
+	mutex_unlock(&vs_session_lock);
+	return r;
+}
+
+#endif /* _VSERVICES_SESSION_H_ */
diff --git a/include/vservices/transport.h b/include/vservices/transport.h
new file mode 100644
index 0000000..6251ce1
--- /dev/null
+++ b/include/vservices/transport.h
@@ -0,0 +1,150 @@
+/*
+ * include/vservices/transport.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains the transport vtable structure. This is made public so
+ * that the application drivers can call the vtable functions directly (via
+ * the inlined wrappers in service.h) rather than indirectly via a function
+ * call.
+ *
+ */
+
+#ifndef _VSERVICES_TRANSPORT_H_
+#define _VSERVICES_TRANSPORT_H_
+
+#include <linux/types.h>
+
+#include <vservices/types.h>
+
+struct vs_transport;
+struct vs_mbuf;
+struct vs_service_device;
+
+/**
+ * struct vs_transport_vtable - Transport driver operations. Transport drivers
+ * must provide implementations for all operations in this table.
+ * --- Message buffer allocation ---
+ * @alloc_mbuf: Allocate an mbuf of the given size for the given service
+ * @free_mbuf: Deallocate an mbuf
+ * @mbuf_size: Return the size in bytes of a message buffer. The size returned
+ *             should be the total number of bytes including any headers.
+ * @max_mbuf_size: Return the maximum allowable message buffer allocation size.
+ * --- Message sending ---
+ * @send: Queue an mbuf for sending
+ * @flush: Start the transfer for the current message batch, if any
+ * @notify: Send a notification
+ * --- Transport-level reset handling ---
+ * @reset: Reset the transport layer
+ * @ready: Ready the transport layer
+ * --- Service management ---
+ * @service_add: A new service has been added to this transport's session
+ * @service_remove: A service has been removed from this transport's session
+ * @service_start: A service on this transport's session has had its resource
+ *     allocations set and is about to start. This is always interleaved with
+ *     service_reset, with one specific exception: the core service client,
+ *     which has its quotas initially hard-coded to 0 send / 1 recv and
+ *     adjusted when the initial startup message arrives.
+ * @service_reset: A service on this transport's session has just been reset,
+ *     and any resources allocated to it should be cleaned up to prepare
+ *     for later reallocation.
+ * @service_send_avail: The number of message buffers that this service is
+ *                      able to send before going over quota.
+ * --- Query transport capabilities ---
+ * @get_notify_bits: Fetch the number of sent and received notification bits
+ *     supported by this transport. Note that this can be any positive value
+ *     up to UINT_MAX.
+ * @get_quota_limits: Fetch the total send and receive message buffer quotas
+ *     supported by this transport. Note that this can be any positive value
+ *     up to UINT_MAX.
+ */
+struct vs_transport_vtable {
+	/* Message buffer allocation */
+	struct vs_mbuf *(*alloc_mbuf)(struct vs_transport *transport,
+			struct vs_service_device *service, size_t size,
+			gfp_t gfp_flags);
+	void (*free_mbuf)(struct vs_transport *transport,
+			struct vs_service_device *service,
+			struct vs_mbuf *mbuf);
+	size_t (*mbuf_size)(struct vs_mbuf *mbuf);
+	size_t (*max_mbuf_size)(struct vs_transport *transport);
+
+	/* Sending messages */
+	int (*send)(struct vs_transport *transport,
+			struct vs_service_device *service,
+			struct vs_mbuf *mbuf, unsigned long flags);
+	int (*flush)(struct vs_transport *transport,
+			struct vs_service_device *service);
+	int (*notify)(struct vs_transport *transport,
+			struct vs_service_device *service,
+			unsigned long bits);
+
+	/* Raising and clearing transport-level reset */
+	void (*reset)(struct vs_transport *transport);
+	void (*ready)(struct vs_transport *transport);
+
+	/* Service management */
+	int (*service_add)(struct vs_transport *transport,
+			struct vs_service_device *service);
+	void (*service_remove)(struct vs_transport *transport,
+			struct vs_service_device *service);
+
+	int (*service_start)(struct vs_transport *transport,
+			struct vs_service_device *service);
+	int (*service_reset)(struct vs_transport *transport,
+			struct vs_service_device *service);
+
+	ssize_t (*service_send_avail)(struct vs_transport *transport,
+			struct vs_service_device *service);
+
+	/* Query transport capabilities */
+	void (*get_notify_bits)(struct vs_transport *transport,
+			unsigned *send_notify_bits, unsigned *recv_notify_bits);
+	void (*get_quota_limits)(struct vs_transport *transport,
+			unsigned *send_quota, unsigned *recv_quota);
+};
+
+/* Flags for .send */
+#define VS_TRANSPORT_SEND_FLAGS_MORE		0x1
+
+/**
+ * struct vs_transport - A structure representing a transport
+ * @type: type of transport i.e. microvisror/loopback etc
+ * @vt: Transport operations table
+ * @notify_info: Array of incoming notification settings
+ * @notify_info_size: Size of the incoming notification array
+ */
+struct vs_transport {
+	const char *type;
+	const struct vs_transport_vtable *vt;
+	struct vs_notify_info *notify_info;
+	int notify_info_size;
+};
+
+/**
+ * struct vs_mbuf - Message buffer. This is always allocated and released by the
+ * transport callbacks defined above, so it may be embedded in a
+ * transport-specific structure containing additional state.
+ * @data: Message data buffer
+ * @size: Size of the data buffer in bytes
+ * @is_recv: True if this mbuf was received from the other end of the
+ *           transport. False if it was allocated by this end for sending.
+ * @priv: Private value that will not be touched by the framework
+ * @queue: list_head for entry in lists. The session layer uses this queue
+ * for receiving messages. The transport driver may use this queue for its
+ * own purposes when sending messages.
+ */
+struct vs_mbuf {
+	void *data;
+	size_t size;
+	bool is_recv;
+	void *priv;
+	struct list_head queue;
+};
+
+#endif /* _VSERVICES_TRANSPORT_H_ */
diff --git a/include/vservices/types.h b/include/vservices/types.h
new file mode 100644
index 0000000..306156e
--- /dev/null
+++ b/include/vservices/types.h
@@ -0,0 +1,41 @@
+/*
+ * include/vservices/types.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _VSERVICE_TYPES_H
+#define _VSERVICE_TYPES_H
+
+#include <linux/types.h>
+
+typedef u16 vs_service_id_t;
+typedef u16 vs_message_id_t;
+
+/*
+ * An opaque handle to a queued asynchronous command. This is used internally
+ * by the generated interface code, to identify which of the pending commands
+ * is being replied to. It is provided as a parameter to non-blocking handler
+ * callbacks for queued asynchronous requests, and must be stored by the server
+ * and passed to the corresponding reply call.
+ */
+typedef struct vservice_queued_request vservice_queued_request_t;
+
+/*
+ * Following enum is to be used by server for informing about successful or
+ * unsuccessful open callback by using VS_SERVER_RESP_SUCCESS or
+ * VS_SERVER_RESP_FAILURE resepectively. Server can choose to complete request
+ * explicitely in this case it should return VS_SERVER_RESP_EXPLICIT_COMPLETE.
+ */
+typedef enum vs_server_response_type {
+	VS_SERVER_RESP_SUCCESS,
+	VS_SERVER_RESP_FAILURE,
+	VS_SERVER_RESP_EXPLICIT_COMPLETE
+} vs_server_response_type_t;
+
+#endif /*_VSERVICE_TYPES_H */
diff --git a/include/vservices/wait.h b/include/vservices/wait.h
new file mode 100644
index 0000000..544937d
--- /dev/null
+++ b/include/vservices/wait.h
@@ -0,0 +1,455 @@
+/*
+ * include/vservices/wait.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Generic wait event helpers for Virtual Service drivers.
+ */
+
+#ifndef _VSERVICE_SERVICE_WAIT_H
+#define _VSERVICE_SERVICE_WAIT_H
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include <vservices/service.h>
+
+/* Older kernels don't have lockdep_assert_held_once(). */
+#ifndef lockdep_assert_held_once
+#ifdef CONFIG_LOCKDEP
+#define lockdep_assert_held_once(l) do {				\
+		WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));	\
+	} while (0)
+#else
+#define lockdep_assert_held_once(l) do { } while (0)
+#endif
+#endif
+
+/* Legacy wait macro; needs rewriting to use vs_state_lock_safe(). */
+/* FIXME: Redmine ticket #229 - philip. */
+/**
+ * __vs_service_wait_event - Wait for a condition to become true for a
+ * Virtual Service.
+ *
+ * @_service: The service to wait for the condition to be true for.
+ * @_wq: Waitqueue to wait on.
+ * @_condition: Condition to wait for.
+ *
+ * Returns: This function returns 0 if the condition is true, or a -ERESTARTSYS
+ *          if the wait loop wait interrupted. If _state is TASK_UNINTERRUPTIBLE
+ *          then this function will always return 0.
+ *
+ * This function must be called with the service's state lock held. The wait
+ * is performed without the state lock held, but the condition is re-checked
+ * after reacquiring the state lock. This property allows this function to
+ * check the state of the service's protocol in a thread safe manner.
+ *
+ * The caller is responsible for ensuring that it has not been detached from
+ * the given service.
+ *
+ * It is nearly always wrong to call this on the service workqueue, since
+ * the workqueue is single-threaded and the state can only change when a
+ * handler function is called on it.
+ */
+#define __vs_service_wait_event(_service, _wq, _cond, _state)		\
+	({								\
+		DEFINE_WAIT(__wait);					\
+		int __ret = 0;						\
+									\
+		lockdep_assert_held_once(&(_service)->state_mutex);	\
+		do {							\
+			prepare_to_wait(&(_wq), &__wait, (_state));	\
+									\
+			if (_cond)					\
+				break;					\
+									\
+			if ((_state) == TASK_INTERRUPTIBLE &&		\
+					signal_pending(current)) {	\
+				__ret = -ERESTARTSYS;			\
+				break;					\
+			}						\
+									\
+			vs_service_state_unlock(_service);		\
+			schedule();					\
+			vs_service_state_lock(_service);		\
+		} while (!(_cond));					\
+									\
+		finish_wait(&(_wq), &__wait);				\
+		__ret;							\
+	})
+
+/* Legacy wait macros; need rewriting to use __vs_wait_state(). */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_wait_event(_service, _wq, _cond) \
+	__vs_service_wait_event(_service, _wq, _cond, TASK_INTERRUPTIBLE)
+#define vs_service_wait_event_nointr(_service, _wq, _cond) \
+	__vs_service_wait_event(_service, _wq, _cond, TASK_UNINTERRUPTIBLE)
+
+/**
+ * __vs_wait_state - block until a condition becomes true on a service state.
+ *
+ * @_state: The protocol state to wait on.
+ * @_cond: Condition to wait for.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ *         with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ *         timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ *         non-framework tasklet); otherwise nothing.
+ *
+ * Return: Return a pointer to a message buffer on successful allocation,
+ *         or an error code in ERR_PTR form.
+ *
+ * This macro blocks waiting until a particular condition becomes true on a
+ * service state. The service must be running; if not, or if it ceases to be
+ * running during the wait, -ECANCELED will be returned.
+ *
+ * This is not an exclusive wait. If an exclusive wait is desired it is
+ * usually better to use the waiting alloc or send functions.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The state lock will be dropped by waiting
+ * but reacquired before returning, unless -ENOLINK is returned, in which case
+ * the service driver has been unbound and the lock cannot be reacquired.
+ */
+#define __vs_wait_state(_state, _cond, _intr, _timeout, _bh)	\
+	({								\
+		DEFINE_WAIT(__wait);					\
+		int __ret;						\
+		int __jiffies __maybe_unused = (_timeout);		\
+		struct vs_service_device *__service = (_state)->service;\
+									\
+		while (1) {						\
+			prepare_to_wait(&__service->quota_wq, &__wait,	\
+					_intr ? TASK_INTERRUPTIBLE :    \
+					TASK_UNINTERRUPTIBLE);		\
+									\
+			if (!VSERVICE_BASE_STATE_IS_RUNNING(		\
+					(_state)->state.base)) {	\
+				__ret = -ECANCELED;			\
+				break;					\
+			}						\
+									\
+			if (_cond) {					\
+				__ret = 0;				\
+				break;					\
+			}						\
+									\
+			if (_intr && signal_pending(current)) {		\
+				__ret = -ERESTARTSYS;			\
+				break;					\
+			}						\
+									\
+			vs_state_unlock##_bh(_state);			\
+									\
+			if (_timeout >= 0) {				\
+				__jiffies = schedule_timeout(__jiffies);\
+				if (!__jiffies) {			\
+					__ret = -ETIMEDOUT;		\
+					break;				\
+				}					\
+			} else {					\
+				schedule();				\
+			}						\
+									\
+			if (!vs_state_lock_safe##_bh(_state)) {		\
+				__ret = -ENOLINK;			\
+				break;					\
+			}						\
+		}							\
+									\
+		finish_wait(&__service->quota_wq, &__wait);		\
+		__ret;							\
+	})
+
+/* Specialisations of __vs_wait_state for common uses. */
+#define vs_wait_state(_state, _cond) \
+	__vs_wait_state(_state, _cond, true, -1,)
+#define vs_wait_state_timeout(_state, _cond, _timeout) \
+	__vs_wait_state(_state, _cond, true, _timeout,)
+#define vs_wait_state_nointr(_state, _cond) \
+	__vs_wait_state(_state, _cond, false, -1,)
+#define vs_wait_state_nointr_timeout(_state, _cond, _timeout) \
+	__vs_wait_state(_state, _cond, false, _timeout,)
+#define vs_wait_state_bh(_state, _cond) \
+	__vs_wait_state(_state, _cond, true, -1, _bh)
+#define vs_wait_state_timeout_bh(_state, _cond, _timeout) \
+	__vs_wait_state(_state, _cond, true, _timeout, _bh)
+#define vs_wait_state_nointr_bh(_state, _cond) \
+	__vs_wait_state(_state, _cond, false, -1, _bh)
+#define vs_wait_state_nointr_timeout_bh(_state, _cond, _timeout) \
+	__vs_wait_state(_state, _cond, false, _timeout, _bh)
+
+/**
+ * __vs_wait_alloc - block until quota is available, then allocate a buffer.
+ *
+ * @_state: The protocol state to allocate a message for.
+ * @_alloc_func: The message buffer allocation function to run. This is the
+ *         full function invocation, not a pointer to the function.
+ * @_cond: Additional condition which must remain true, or else the wait
+ *         will fail with -ECANCELED. This is typically used to check the
+ *         service's protocol state. Note that this condition will only
+ *         be checked after sleeping; it is assumed to be true when the
+ *         macro is first called.
+ * @_unlock: If true, drop the service state lock before sleeping. The wait
+ *         may then fail with -ENOLINK if the driver is detached from the
+ *         service, in which case the lock is dropped.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ *         with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ *         timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ *         non-framework tasklet); otherwise nothing.
+ *
+ * Return: Return a pointer to a message buffer on successful allocation,
+ *         or an error code in ERR_PTR form.
+ *
+ * This macro calls a specified message allocation function, and blocks
+ * if it returns -ENOBUFS, waiting until quota is available on the service
+ * before retrying. It aborts the wait if the service resets, or if the
+ * optionally specified condition becomes false. Note that a reset followed
+ * quickly by an activate might not trigger a failure; if that is significant
+ * for your driver, use the optional condition to detect it.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The reference and state lock will still be
+ * held on return, unless -ENOLINK is returned, in which case the lock has been
+ * dropped and cannot be reacquired.
+ *
+ * This is always an exclusive wait. It is safe to call without separately
+ * waking the waitqueue afterwards; if the allocator function fails for any
+ * reason other than quota exhaustion then another waiter will be woken.
+ *
+ * Be wary of potential deadlocks when using this macro on the service
+ * workqueue. If both ends block their service workqueues waiting for quota,
+ * then no progress can be made. It is usually only correct to block the
+ * service workqueue on the server side.
+ */
+#define __vs_wait_alloc(_state, _alloc_func, _cond, _unlock, _intr, 	\
+		_timeout, _bh)						\
+	({								\
+		DEFINE_WAIT(__wait);					\
+		struct vs_mbuf *__mbuf = NULL;				\
+		int __jiffies __maybe_unused = (_timeout);		\
+		struct vs_service_device *__service = (_state)->service;\
+									\
+		while (!vs_service_send_mbufs_available(__service)) {	\
+			if (_intr && signal_pending(current)) {		\
+				__mbuf = ERR_PTR(-ERESTARTSYS);		\
+				break;					\
+			}						\
+									\
+			prepare_to_wait_exclusive(			\
+					&__service->quota_wq, &__wait,	\
+					_intr ? TASK_INTERRUPTIBLE :    \
+					TASK_UNINTERRUPTIBLE);		\
+									\
+			if (_unlock)					\
+				vs_state_unlock##_bh(_state);		\
+									\
+			if (_timeout >= 0) {				\
+				__jiffies = schedule_timeout(__jiffies);\
+				if (!__jiffies) {			\
+					__mbuf = ERR_PTR(-ETIMEDOUT);	\
+					break;				\
+				}					\
+			} else {					\
+				schedule();				\
+			}						\
+									\
+			if (_unlock && !vs_state_lock_safe##_bh(	\
+						_state)) {		\
+				__mbuf = ERR_PTR(-ENOLINK);		\
+				break;					\
+			}						\
+									\
+			if (!VSERVICE_BASE_STATE_IS_RUNNING(		\
+					(_state)->state.base) ||	\
+					!(_cond)) {			\
+				__mbuf = ERR_PTR(-ECANCELED);		\
+				break;					\
+			}						\
+		}							\
+		finish_wait(&__service->quota_wq, &__wait);		\
+									\
+		if (__mbuf == NULL)					\
+			__mbuf = (_alloc_func);				\
+		if (IS_ERR(__mbuf) && (PTR_ERR(__mbuf) != -ENOBUFS))	\
+			wake_up(&__service->quota_wq);			\
+		__mbuf;							\
+	})
+
+/* Specialisations of __vs_wait_alloc for common uses. */
+#define vs_wait_alloc(_state, _cond, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1,)
+#define vs_wait_alloc_timeout(_state, _cond, _alloc_func, _timeout) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, _timeout,)
+#define vs_wait_alloc_nointr(_state, _cond, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1,)
+#define vs_wait_alloc_nointr_timeout(_state, _cond, _alloc_func, _timeout) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, _timeout,)
+#define vs_wait_alloc_bh(_state, _cond, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1, _bh)
+#define vs_wait_alloc_timeout_bh(_state, _cond, _alloc_func, _timeout) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, _timeout, _bh)
+#define vs_wait_alloc_nointr_bh(_state, _cond, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1, _bh)
+#define vs_wait_alloc_nointr_timeout_bh(_state, _cond, _alloc_func, _timeout) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, _timeout, _bh)
+#define vs_wait_alloc_locked(_state, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, true, false, true, -1,)
+
+/* Legacy wait macros, to be removed and replaced with those above. */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_waiting_alloc(_state, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, true, false, true, -1,)
+#define vs_service_waiting_alloc_cond_locked(_state, _alloc_func, _cond) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1,)
+#define vs_service_waiting_alloc_cond_locked_nointr(_state, _alloc_func, _cond) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1,)
+
+/**
+ * __vs_wait_send - block until quota is available, then send a message.
+ *
+ * @_state: The protocol state to send a message for.
+ * @_cond: Additional condition which must remain true, or else the wait
+ *         will fail with -ECANCELED. This is typically used to check the
+ *         service's protocol state. Note that this condition will only
+ *         be checked after sleeping; it is assumed to be true when the
+ *         macro is first called.
+ * @_send_func: The message send function to run. This is the full function
+ *         invocation, not a pointer to the function.
+ * @_unlock: If true, drop the service state lock before sleeping. The wait
+ *         may then fail with -ENOLINK if the driver is detached from the
+ *         service, in which case the lock is dropped.
+ * @_check_running: If true, the wait will return -ECANCELED if the service's
+ *         base state is not active, or ceases to be active.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ *         with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ *         timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ *         non-framework tasklet); otherwise nothing.
+ *
+ * Return: If the send succeeds, then 0 is returned; otherwise an error
+ *         code may be returned as described above.
+ *
+ * This macro calls a specified message send function, and blocks if it
+ * returns -ENOBUFS, waiting until quota is available on the service before
+ * retrying. It aborts the wait if it finds the service in reset, or if the
+ * optionally specified condition becomes false. Note that a reset followed
+ * quickly by an activate might not trigger a failure; if that is significant
+ * for your driver, use the optional condition to detect it.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The reference and state lock will still be
+ * held on return, unless -ENOLINK is returned, in which case the lock has been
+ * dropped and cannot be reacquired.
+ *
+ * This is always an exclusive wait. It is safe to call without separately
+ * waking the waitqueue afterwards; if the allocator function fails for any
+ * reason other than quota exhaustion then another waiter will be woken.
+ *
+ * Be wary of potential deadlocks when calling this function on the service
+ * workqueue. If both ends block their service workqueues waiting for quota,
+ * then no progress can be made. It is usually only correct to block the
+ * service workqueue on the server side.
+ */
+#define __vs_wait_send(_state, _cond, _send_func, _unlock, 		\
+		_check_running, _intr, _timeout, _bh)			\
+	({								\
+		DEFINE_WAIT(__wait);					\
+		int __ret = 0;						\
+		int __jiffies __maybe_unused = (_timeout);		\
+		struct vs_service_device *__service = (_state)->service;\
+									\
+		while (!vs_service_send_mbufs_available(__service)) {	\
+			if (_intr && signal_pending(current)) {		\
+				__ret = -ERESTARTSYS;			\
+				break;					\
+			}						\
+									\
+			prepare_to_wait_exclusive(			\
+					&__service->quota_wq, &__wait,	\
+					_intr ? TASK_INTERRUPTIBLE :    \
+					TASK_UNINTERRUPTIBLE);		\
+									\
+			if (_unlock)					\
+				vs_state_unlock##_bh(_state);		\
+									\
+			if (_timeout >= 0) {				\
+				__jiffies = schedule_timeout(__jiffies);\
+				if (!__jiffies) {			\
+					__ret = -ETIMEDOUT;		\
+					break;				\
+				}					\
+			} else {					\
+				schedule();				\
+			}						\
+									\
+			if (_unlock && !vs_state_lock_safe##_bh(	\
+						_state)) {		\
+				__ret = -ENOLINK;			\
+				break;					\
+			}						\
+									\
+			if ((_check_running &&				\
+					!VSERVICE_BASE_STATE_IS_RUNNING(\
+					(_state)->state.base)) ||	\
+					!(_cond)) {			\
+				__ret = -ECANCELED;			\
+				break;					\
+			}						\
+		}							\
+		finish_wait(&__service->quota_wq, &__wait);		\
+									\
+		if (!__ret)						\
+			__ret = (_send_func);				\
+		if ((__ret < 0) && (__ret != -ENOBUFS))			\
+			wake_up(&__service->quota_wq);			\
+		__ret;							\
+	})
+
+/* Specialisations of __vs_wait_send for common uses. */
+#define vs_wait_send(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, -1,)
+#define vs_wait_send_timeout(_state, _cond, _send_func, _timeout) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, _timeout,)
+#define vs_wait_send_nointr(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, -1,)
+#define vs_wait_send_nointr_timeout(_state, _cond, _send_func, _timeout) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, _timeout,)
+#define vs_wait_send_bh(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, -1, _bh)
+#define vs_wait_send_timeout_bh(_state, _cond, _send_func, _timeout) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, \
+			_timeout, _bh)
+#define vs_wait_send_nointr_bh(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, -1, _bh)
+#define vs_wait_send_nointr_timeout_bh(_state, _cond, _send_func, _timeout) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, \
+			_timeout, _bh)
+#define vs_wait_send_locked(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, false, true, true, -1,)
+#define vs_wait_send_locked_nocheck(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, false, false, true, -1,)
+
+/* Legacy wait macros, to be removed and replaced with those above. */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_waiting_send(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, true, true, true, -1,)
+#define vs_service_waiting_send_nointr(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, true, true, false, -1,)
+#define vs_service_waiting_send_cond(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, -1,)
+#define vs_service_waiting_send_cond_nointr(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, -1,)
+#define vs_service_waiting_send_nocheck(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, true, false, true, -1,)
+
+#endif /* _VSERVICE_SERVICE_WAIT_H */
diff --git a/kernel/audit.c b/kernel/audit.c
index 3461a3d..194fa1a 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -125,6 +125,7 @@
 static atomic_t    audit_lost = ATOMIC_INIT(0);
 
 /* The netlink socket. */
+static DEFINE_MUTEX(audit_sock_mutex);
 static struct sock *audit_sock;
 static int audit_net_id;
 
@@ -411,7 +412,9 @@
 restart:
 	/* take a reference in case we can't send it and we want to hold it */
 	skb_get(skb);
+	mutex_lock(&audit_sock_mutex);
 	err = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+	mutex_unlock(&audit_sock_mutex);
 	if (err < 0) {
 		pr_err("netlink_unicast sending to audit_pid=%d returned error: %d\n",
 		       audit_pid, err);
@@ -423,7 +426,9 @@
 				snprintf(s, sizeof(s), "audit_pid=%d reset", audit_pid);
 				audit_log_lost(s);
 				audit_pid = 0;
+				mutex_lock(&audit_sock_mutex);
 				audit_sock = NULL;
+				mutex_unlock(&audit_sock_mutex);
 			} else {
 				pr_warn("re-scheduling(#%d) write to audit_pid=%d\n",
 					attempts, audit_pid);
@@ -811,12 +816,16 @@
 
 static int audit_replace(pid_t pid)
 {
+	int	len;
 	struct sk_buff *skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0,
 					       &pid, sizeof(pid));
 
 	if (!skb)
 		return -ENOMEM;
-	return netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+	mutex_lock(&audit_sock_mutex);
+	len = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+	mutex_unlock(&audit_sock_mutex);
+	return len;
 }
 
 static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -901,7 +910,9 @@
 				audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
 			audit_pid = new_pid;
 			audit_nlk_portid = NETLINK_CB(skb).portid;
+			mutex_lock(&audit_sock_mutex);
 			audit_sock = skb->sk;
+			mutex_unlock(&audit_sock_mutex);
 		}
 		if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
 			err = audit_set_rate_limit(s.rate_limit);
@@ -1169,10 +1180,12 @@
 {
 	struct audit_net *aunet = net_generic(net, audit_net_id);
 	struct sock *sock = aunet->nlsk;
+	mutex_lock(&audit_sock_mutex);
 	if (sock == audit_sock) {
 		audit_pid = 0;
 		audit_sock = NULL;
 	}
+	mutex_unlock(&audit_sock_mutex);
 
 	RCU_INIT_POINTER(aunet->nlsk, NULL);
 	synchronize_net();
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 475ec4f..340eccd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11216,13 +11216,25 @@
 
 static void perf_event_exit_cpu_context(int cpu)
 {
+	struct perf_cpu_context *cpuctx;
 	struct perf_event_context *ctx;
+	unsigned long flags;
 	struct pmu *pmu;
 	int idx;
 
 	idx = srcu_read_lock(&pmus_srcu);
 	list_for_each_entry_rcu(pmu, &pmus, entry) {
-		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+		ctx = &cpuctx->ctx;
+
+		/* Cancel the mux hrtimer to avoid CPU migration */
+		if (pmu->task_ctx_nr != perf_sw_context) {
+			raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
+			hrtimer_cancel(&cpuctx->hrtimer);
+			cpuctx->hrtimer_active = 0;
+			raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock,
+							flags);
+		}
 
 		mutex_lock(&ctx->mutex);
 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
diff --git a/kernel/module.c b/kernel/module.c
index 8a84031..1277bdf 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -4035,7 +4035,7 @@
 
 	for (i = 0; i < kallsyms->num_symtab; i++)
 		if (strcmp(name, symname(kallsyms, i)) == 0 &&
-		    kallsyms->symtab[i].st_info != 'U')
+		    kallsyms->symtab[i].st_shndx != SHN_UNDEF)
 			return kallsyms->symtab[i].st_value;
 	return 0;
 }
@@ -4081,6 +4081,10 @@
 		if (mod->state == MODULE_STATE_UNFORMED)
 			continue;
 		for (i = 0; i < kallsyms->num_symtab; i++) {
+
+			if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
+				continue;
+
 			ret = fn(data, symname(kallsyms, i),
 				 mod, kallsyms->symtab[i].st_value);
 			if (ret != 0)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8f29103..424bb66 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8031,6 +8031,7 @@
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 
 	update_max_interval();
+	walt_update_min_max_capacity();
 
 	return 0;
 }
@@ -8064,6 +8065,7 @@
 		return ret;
 	}
 	sched_domains_numa_masks_clear(cpu);
+	walt_update_min_max_capacity();
 	return 0;
 }
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0bb5046..7944ae9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7350,7 +7350,7 @@
 	return cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
 	       cpu_active(cpu) && !cpu_isolated(cpu) &&
 	       capacity_orig_of(cpu) >= capacity_orig_of(rtg_target_cpu) &&
-	       task_fits_max(p, cpu);
+	       task_fits_max(p, cpu) && !__cpu_overutilized(cpu, task_util(p));
 }
 
 #define SCHED_SELECT_PREV_CPU_NSEC	2000000
@@ -11166,7 +11166,8 @@
 	 * - A task which has been woken up by try_to_wake_up() and
 	 *   waiting for actually being woken up by sched_ttwu_pending().
 	 */
-	if (!se->sum_exec_runtime || p->state == TASK_WAKING)
+	if (!se->sum_exec_runtime ||
+	    (p->state == TASK_WAKING && p->sched_remote_wakeup))
 		return true;
 
 	return false;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e24df36..bb9cf4a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2422,7 +2422,11 @@
 	int i;
 	int max_cap = 0, min_cap = INT_MAX;
 
-	for_each_online_cpu(i) {
+	for_each_possible_cpu(i) {
+
+		if (!cpu_active(i))
+			continue;
+
 		max_cap = max(max_cap, cpu_capacity(i));
 		min_cap = min(min_cap, cpu_capacity(i));
 	}
@@ -2687,6 +2691,7 @@
 }
 
 extern void walt_sched_energy_populated_callback(void);
+extern void walt_update_min_max_capacity(void);
 
 #else	/* CONFIG_SCHED_WALT */
 
@@ -2820,6 +2825,7 @@
 }
 
 static inline void walt_sched_energy_populated_callback(void) { }
+static inline void walt_update_min_max_capacity(void) { }
 
 #endif	/* CONFIG_SCHED_WALT */
 
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index a8fab0c..92fcb92 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -791,7 +791,7 @@
 	    u64 prefer_idle)
 {
 	struct schedtune *st = css_st(css);
-	st->prefer_idle = prefer_idle;
+	st->prefer_idle = !!prefer_idle;
 
 	return 0;
 }
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 6144dee..5184252 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -2185,7 +2185,7 @@
 	return capacity;
 }
 
-static void update_min_max_capacity(void)
+void walt_update_min_max_capacity(void)
 {
 	unsigned long flags;
 
@@ -2411,7 +2411,7 @@
 		return 0;
 
 	if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
-		update_min_max_capacity();
+		walt_update_min_max_capacity();
 		return 0;
 	}
 
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 78c0e04..aa25aac 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -815,7 +815,8 @@
 	/* Convert (if necessary) to absolute time */
 	if (flags != TIMER_ABSTIME) {
 		ktime_t now = alarm_bases[type].gettime();
-		exp = ktime_add(now, exp);
+
+		exp = ktime_add_safe(now, exp);
 	}
 
 	if (alarmtimer_do_nsleep(&alarm, exp))
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 15f3487..9e5ffd1 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -704,6 +704,16 @@
 #endif
 }
 
+u64 jiffies64_to_nsecs(u64 j)
+{
+#if !(NSEC_PER_SEC % HZ)
+	return (NSEC_PER_SEC / HZ) * j;
+# else
+	return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
+#endif
+}
+EXPORT_SYMBOL(jiffies64_to_nsecs);
+
 /**
  * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
  *
diff --git a/kernel/time/timeconst.bc b/kernel/time/timeconst.bc
index c486889..f83bbb8 100644
--- a/kernel/time/timeconst.bc
+++ b/kernel/time/timeconst.bc
@@ -98,6 +98,12 @@
 		print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n"
 		print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n"
 		print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n"
+
+		cd=gcd(hz,1000000000)
+		print "#define HZ_TO_NSEC_NUM\t\t", 1000000000/cd, "\n"
+		print "#define HZ_TO_NSEC_DEN\t\t", hz/cd, "\n"
+		print "#define NSEC_TO_HZ_NUM\t\t", hz/cd, "\n"
+		print "#define NSEC_TO_HZ_DEN\t\t", 1000000000/cd, "\n"
 		print "\n"
 
 		print "#endif /* KERNEL_TIMECONST_H */\n"
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index dc29b60..f316e90 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1504,6 +1504,8 @@
 	tmp_iter_page = first_page;
 
 	do {
+		cond_resched();
+
 		to_remove_page = tmp_iter_page;
 		rb_inc_page(cpu_buffer, &tmp_iter_page);
 
diff --git a/lib/klist.c b/lib/klist.c
index 0507fa5..f6b5478 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -336,8 +336,9 @@
 	void (*put)(struct klist_node *) = i->i_klist->put;
 	struct klist_node *last = i->i_cur;
 	struct klist_node *prev;
+	unsigned long flags;
 
-	spin_lock(&i->i_klist->k_lock);
+	spin_lock_irqsave(&i->i_klist->k_lock, flags);
 
 	if (last) {
 		prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@
 		prev = to_klist_node(prev->n_node.prev);
 	}
 
-	spin_unlock(&i->i_klist->k_lock);
+	spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
 
 	if (put && last)
 		put(last);
@@ -377,8 +378,9 @@
 	void (*put)(struct klist_node *) = i->i_klist->put;
 	struct klist_node *last = i->i_cur;
 	struct klist_node *next;
+	unsigned long flags;
 
-	spin_lock(&i->i_klist->k_lock);
+	spin_lock_irqsave(&i->i_klist->k_lock, flags);
 
 	if (last) {
 		next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@
 		next = to_klist_node(next->n_node.next);
 	}
 
-	spin_unlock(&i->i_klist->k_lock);
+	spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
 
 	if (put && last)
 		put(last);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 101dac0..fdffd62 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -251,8 +251,10 @@
 	if (!new_tbl)
 		return 0;
 
-	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
+	for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
 		rhashtable_rehash_chain(ht, old_hash);
+		cond_resched();
+	}
 
 	/* Publish the new table pointer. */
 	rcu_assign_pointer(ht->tbl, new_tbl);
@@ -993,6 +995,7 @@
 		for (i = 0; i < tbl->size; i++) {
 			struct rhash_head *pos, *next;
 
+			cond_resched();
 			for (pos = rht_dereference(tbl->buckets[i], ht),
 			     next = !rht_is_a_nulls(pos) ?
 					rht_dereference(pos->next, ht) : NULL;
diff --git a/mm/madvise.c b/mm/madvise.c
index ee7ad9b..b753f02 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -83,7 +83,7 @@
 		new_flags |= VM_DONTDUMP;
 		break;
 	case MADV_DODUMP:
-		if (new_flags & VM_SPECIAL) {
+		if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
 			error = -EINVAL;
 			goto out;
 		}
diff --git a/mm/shmem.c b/mm/shmem.c
index 61a39aa..290c5b8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2160,6 +2160,8 @@
 			mpol_shared_policy_init(&info->policy, NULL);
 			break;
 		}
+
+		lockdep_annotate_inode_mutex_key(inode);
 	} else
 		shmem_free_inode(sb);
 	return inode;
diff --git a/mm/slub.c b/mm/slub.c
index 1a14283..b5c9fde 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1806,7 +1806,7 @@
 {
 	struct page *page, *page2;
 	void *object = NULL;
-	int available = 0;
+	unsigned int available = 0;
 	int objects;
 
 	/*
@@ -4900,10 +4900,10 @@
 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
 				 size_t length)
 {
-	unsigned long objects;
+	unsigned int objects;
 	int err;
 
-	err = kstrtoul(buf, 10, &objects);
+	err = kstrtouint(buf, 10, &objects);
 	if (err)
 		return err;
 	if (objects && !kmem_cache_has_cpu_partial(s))
diff --git a/mm/vmstat.c b/mm/vmstat.c
index fdd884a..727bc07 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1079,6 +1079,9 @@
 #ifdef CONFIG_SMP
 	"nr_tlb_remote_flush",
 	"nr_tlb_remote_flush_received",
+#else
+	"", /* nr_tlb_remote_flush */
+	"", /* nr_tlb_remote_flush_received */
 #endif /* CONFIG_SMP */
 	"nr_tlb_local_flush_all",
 	"nr_tlb_local_flush_one",
@@ -1087,7 +1090,6 @@
 #ifdef CONFIG_DEBUG_VM_VMACACHE
 	"vmacache_find_calls",
 	"vmacache_find_hits",
-	"vmacache_full_flushes",
 #endif
 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
 	"speculative_pgfault"
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 79f1fa2..23654f1 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -745,6 +745,7 @@
 		hdr.hop_limit, &hdr.daddr);
 
 	skb_push(skb, sizeof(hdr));
+	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
 	skb_copy_to_linear_data(skb, &hdr, sizeof(hdr));
 
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 070cf13..f2660c1 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -67,6 +67,9 @@
 	if (e->ethproto != htons(ETH_P_ARP) ||
 	    e->invflags & EBT_IPROTO)
 		return -EINVAL;
+	if (ebt_invalid_target(info->target))
+		return -EINVAL;
+
 	return 0;
 }
 
diff --git a/net/core/dev.c b/net/core/dev.c
index df92fb8..62dd763 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1666,6 +1666,28 @@
 }
 EXPORT_SYMBOL(call_netdevice_notifiers);
 
+/**
+ *	call_netdevice_notifiers_mtu - call all network notifier blocks
+ *	@val: value passed unmodified to notifier function
+ *	@dev: net_device pointer passed unmodified to notifier function
+ *	@arg: additional u32 argument passed to the notifier function
+ *
+ *	Call all network notifier blocks.  Parameters and return value
+ *	are as for raw_notifier_call_chain().
+ */
+static int call_netdevice_notifiers_mtu(unsigned long val,
+					struct net_device *dev, u32 arg)
+{
+	struct netdev_notifier_info_ext info = {
+		.info.dev = dev,
+		.ext.mtu = arg,
+	};
+
+	BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
+
+	return call_netdevice_notifiers_info(val, dev, &info.info);
+}
+
 #ifdef CONFIG_NET_INGRESS
 static struct static_key ingress_needed __read_mostly;
 
@@ -6721,14 +6743,16 @@
 	err = __dev_set_mtu(dev, new_mtu);
 
 	if (!err) {
-		err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+		err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+						   orig_mtu);
 		err = notifier_to_errno(err);
 		if (err) {
 			/* setting mtu back and notifying everyone again,
 			 * so that they have a chance to revert changes.
 			 */
 			__dev_set_mtu(dev, orig_mtu);
-			call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+			call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+						     new_mtu);
 		}
 	}
 	return err;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 340a3db..2cfbe3f 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1147,6 +1147,12 @@
 		lladdr = neigh->ha;
 	}
 
+	/* Update confirmed timestamp for neighbour entry after we
+	 * received ARP packet even if it doesn't change IP to MAC binding.
+	 */
+	if (new & NUD_CONNECTED)
+		neigh->confirmed = jiffies;
+
 	/* If entry was valid and address is not changed,
 	   do not change entry state, if new one is STALE.
 	 */
@@ -1168,15 +1174,12 @@
 		}
 	}
 
-	/* Update timestamps only once we know we will make a change to the
+	/* Update timestamp only once we know we will make a change to the
 	 * neighbour entry. Otherwise we risk to move the locktime window with
 	 * noop updates and ignore relevant ARP updates.
 	 */
-	if (new != old || lladdr != neigh->ha) {
-		if (new & NUD_CONNECTED)
-			neigh->confirmed = jiffies;
+	if (new != old || lladdr != neigh->ha)
 		neigh->updated = jiffies;
-	}
 
 	if (new != old) {
 		neigh_del_timer(neigh);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 194e844..189082d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2368,6 +2368,12 @@
 	else if (ops->get_num_rx_queues)
 		num_rx_queues = ops->get_num_rx_queues();
 
+	if (num_tx_queues < 1 || num_tx_queues > 4096)
+		return ERR_PTR(-EINVAL);
+
+	if (num_rx_queues < 1 || num_rx_queues > 4096)
+		return ERR_PTR(-EINVAL);
+
 	err = -ENOMEM;
 	dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
 			       ops->setup, num_tx_queues, num_rx_queues);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b6a319c..cfadc0a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1584,6 +1584,20 @@
 }
 EXPORT_SYMBOL(___pskb_trim);
 
+/* Note : use pskb_trim_rcsum() instead of calling this directly
+ */
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
+{
+	if (skb->ip_summed == CHECKSUM_COMPLETE) {
+		int delta = skb->len - len;
+
+		skb->csum = csum_sub(skb->csum,
+				     skb_checksum(skb, len, delta, 0));
+	}
+	return __pskb_trim(skb, len);
+}
+EXPORT_SYMBOL(pskb_trim_rcsum_slow);
+
 /**
  *	__pskb_pull_tail - advance tail of skb header
  *	@skb: buffer to reallocate
@@ -2431,20 +2445,27 @@
 /**
  *	skb_rbtree_purge - empty a skb rbtree
  *	@root: root of the rbtree to empty
+ *	Return value: the sum of truesizes of all purged skbs.
  *
  *	Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
  *	the list and one reference dropped. This function does not take
  *	any lock. Synchronization should be handled by the caller (e.g., TCP
  *	out-of-order queue is protected by the socket lock).
  */
-void skb_rbtree_purge(struct rb_root *root)
+unsigned int skb_rbtree_purge(struct rb_root *root)
 {
-	struct sk_buff *skb, *next;
+	struct rb_node *p = rb_first(root);
+	unsigned int sum = 0;
 
-	rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
+	while (p) {
+		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+		p = rb_next(p);
+		rb_erase(&skb->rbnode, root);
+		sum += skb->truesize;
 		kfree_skb(skb);
-
-	*root = RB_ROOT;
+	}
+	return sum;
 }
 
 /**
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 4a05d78..84ff43a 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -605,11 +605,13 @@
 	if (sk->sk_state == DCCP_LISTEN) {
 		if (dh->dccph_type == DCCP_PKT_REQUEST) {
 			/* It is possible that we process SYN packets from backlog,
-			 * so we need to make sure to disable BH right there.
+			 * so we need to make sure to disable BH and RCU right there.
 			 */
+			rcu_read_lock();
 			local_bh_disable();
 			acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
 			local_bh_enable();
+			rcu_read_unlock();
 			if (!acceptable)
 				return 1;
 			consume_skb(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 6697b18..28ad6f1 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -493,9 +493,11 @@
 
 		dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
 							      ireq->ir_rmt_addr);
+		rcu_read_lock();
 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
 					    ireq->ir_rmt_addr,
-					    ireq_opt_deref(ireq));
+					    rcu_dereference(ireq->ireq_opt));
+		rcu_read_unlock();
 		err = net_xmit_eval(err);
 	}
 
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
index 5ac7789..3bfec47 100644
--- a/net/ieee802154/6lowpan/6lowpan_i.h
+++ b/net/ieee802154/6lowpan/6lowpan_i.h
@@ -16,37 +16,19 @@
 #define LOWPAN_DISPATCH_FRAG1           0xc0
 #define LOWPAN_DISPATCH_FRAGN           0xe0
 
-struct lowpan_create_arg {
+struct frag_lowpan_compare_key {
 	u16 tag;
 	u16 d_size;
-	const struct ieee802154_addr *src;
-	const struct ieee802154_addr *dst;
+	const struct ieee802154_addr src;
+	const struct ieee802154_addr dst;
 };
 
-/* Equivalent of ipv4 struct ip
+/* Equivalent of ipv4 struct ipq
  */
 struct lowpan_frag_queue {
 	struct inet_frag_queue	q;
-
-	u16			tag;
-	u16			d_size;
-	struct ieee802154_addr	saddr;
-	struct ieee802154_addr	daddr;
 };
 
-static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
-{
-	switch (a->mode) {
-	case IEEE802154_ADDR_LONG:
-		return (((__force u64)a->extended_addr) >> 32) ^
-			(((__force u64)a->extended_addr) & 0xffffffff);
-	case IEEE802154_ADDR_SHORT:
-		return (__force u32)(a->short_addr + (a->pan_id << 16));
-	default:
-		return 0;
-	}
-}
-
 int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
 void lowpan_net_frag_exit(void);
 int lowpan_net_frag_init(void);
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index f85b08b..6fca755 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -37,47 +37,15 @@
 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
 			     struct sk_buff *prev, struct net_device *ldev);
 
-static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
-				     const struct ieee802154_addr *saddr,
-				     const struct ieee802154_addr *daddr)
-{
-	net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
-	return jhash_3words(ieee802154_addr_hash(saddr),
-			    ieee802154_addr_hash(daddr),
-			    (__force u32)(tag + (d_size << 16)),
-			    lowpan_frags.rnd);
-}
-
-static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
-{
-	const struct lowpan_frag_queue *fq;
-
-	fq = container_of(q, struct lowpan_frag_queue, q);
-	return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
-}
-
-static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
-{
-	const struct lowpan_frag_queue *fq;
-	const struct lowpan_create_arg *arg = a;
-
-	fq = container_of(q, struct lowpan_frag_queue, q);
-	return	fq->tag == arg->tag && fq->d_size == arg->d_size &&
-		ieee802154_addr_equal(&fq->saddr, arg->src) &&
-		ieee802154_addr_equal(&fq->daddr, arg->dst);
-}
-
 static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
 {
-	const struct lowpan_create_arg *arg = a;
+	const struct frag_lowpan_compare_key *key = a;
 	struct lowpan_frag_queue *fq;
 
 	fq = container_of(q, struct lowpan_frag_queue, q);
 
-	fq->tag = arg->tag;
-	fq->d_size = arg->d_size;
-	fq->saddr = *arg->src;
-	fq->daddr = *arg->dst;
+	BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
+	memcpy(&q->key, key, sizeof(*key));
 }
 
 static void lowpan_frag_expire(unsigned long data)
@@ -93,10 +61,10 @@
 	if (fq->q.flags & INET_FRAG_COMPLETE)
 		goto out;
 
-	inet_frag_kill(&fq->q, &lowpan_frags);
+	inet_frag_kill(&fq->q);
 out:
 	spin_unlock(&fq->q.lock);
-	inet_frag_put(&fq->q, &lowpan_frags);
+	inet_frag_put(&fq->q);
 }
 
 static inline struct lowpan_frag_queue *
@@ -104,25 +72,20 @@
 	const struct ieee802154_addr *src,
 	const struct ieee802154_addr *dst)
 {
-	struct inet_frag_queue *q;
-	struct lowpan_create_arg arg;
-	unsigned int hash;
 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
 		net_ieee802154_lowpan(net);
+	struct frag_lowpan_compare_key key = {
+		.tag = cb->d_tag,
+		.d_size = cb->d_size,
+		.src = *src,
+		.dst = *dst,
+	};
+	struct inet_frag_queue *q;
 
-	arg.tag = cb->d_tag;
-	arg.d_size = cb->d_size;
-	arg.src = src;
-	arg.dst = dst;
-
-	hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
-
-	q = inet_frag_find(&ieee802154_lowpan->frags,
-			   &lowpan_frags, &arg, hash);
-	if (IS_ERR_OR_NULL(q)) {
-		inet_frag_maybe_warn_overflow(q, pr_fmt());
+	q = inet_frag_find(&ieee802154_lowpan->frags, &key);
+	if (!q)
 		return NULL;
-	}
+
 	return container_of(q, struct lowpan_frag_queue, q);
 }
 
@@ -229,7 +192,7 @@
 	struct sk_buff *fp, *head = fq->q.fragments;
 	int sum_truesize;
 
-	inet_frag_kill(&fq->q, &lowpan_frags);
+	inet_frag_kill(&fq->q);
 
 	/* Make the one we just received the head. */
 	if (prev) {
@@ -437,7 +400,7 @@
 		ret = lowpan_frag_queue(fq, skb, frag_type);
 		spin_unlock(&fq->q.lock);
 
-		inet_frag_put(&fq->q, &lowpan_frags);
+		inet_frag_put(&fq->q);
 		return ret;
 	}
 
@@ -447,24 +410,22 @@
 }
 
 #ifdef CONFIG_SYSCTL
-static int zero;
 
 static struct ctl_table lowpan_frags_ns_ctl_table[] = {
 	{
 		.procname	= "6lowpanfrag_high_thresh",
 		.data		= &init_net.ieee802154_lowpan.frags.high_thresh,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= proc_doulongvec_minmax,
 		.extra1		= &init_net.ieee802154_lowpan.frags.low_thresh
 	},
 	{
 		.procname	= "6lowpanfrag_low_thresh",
 		.data		= &init_net.ieee802154_lowpan.frags.low_thresh,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
+		.proc_handler	= proc_doulongvec_minmax,
 		.extra2		= &init_net.ieee802154_lowpan.frags.high_thresh
 	},
 	{
@@ -580,14 +541,20 @@
 {
 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
 		net_ieee802154_lowpan(net);
+	int res;
 
 	ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
 	ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
 	ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
+	ieee802154_lowpan->frags.f = &lowpan_frags;
 
-	inet_frags_init_net(&ieee802154_lowpan->frags);
-
-	return lowpan_frags_ns_sysctl_register(net);
+	res = inet_frags_init_net(&ieee802154_lowpan->frags);
+	if (res < 0)
+		return res;
+	res = lowpan_frags_ns_sysctl_register(net);
+	if (res < 0)
+		inet_frags_exit_net(&ieee802154_lowpan->frags);
+	return res;
 }
 
 static void __net_exit lowpan_frags_exit_net(struct net *net)
@@ -596,7 +563,7 @@
 		net_ieee802154_lowpan(net);
 
 	lowpan_frags_ns_sysctl_unregister(net);
-	inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
+	inet_frags_exit_net(&ieee802154_lowpan->frags);
 }
 
 static struct pernet_operations lowpan_frags_ops = {
@@ -604,32 +571,63 @@
 	.exit = lowpan_frags_exit_net,
 };
 
+static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
+{
+	return jhash2(data,
+		      sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
+}
+
+static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+	const struct inet_frag_queue *fq = data;
+
+	return jhash2((const u32 *)&fq->key,
+		      sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
+}
+
+static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+	const struct frag_lowpan_compare_key *key = arg->key;
+	const struct inet_frag_queue *fq = ptr;
+
+	return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+static const struct rhashtable_params lowpan_rhash_params = {
+	.head_offset		= offsetof(struct inet_frag_queue, node),
+	.hashfn			= lowpan_key_hashfn,
+	.obj_hashfn		= lowpan_obj_hashfn,
+	.obj_cmpfn		= lowpan_obj_cmpfn,
+	.automatic_shrinking	= true,
+};
+
 int __init lowpan_net_frag_init(void)
 {
 	int ret;
 
+	lowpan_frags.constructor = lowpan_frag_init;
+	lowpan_frags.destructor = NULL;
+	lowpan_frags.qsize = sizeof(struct frag_queue);
+	lowpan_frags.frag_expire = lowpan_frag_expire;
+	lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
+	lowpan_frags.rhash_params = lowpan_rhash_params;
+	ret = inet_frags_init(&lowpan_frags);
+	if (ret)
+		goto out;
+
 	ret = lowpan_frags_sysctl_register();
 	if (ret)
-		return ret;
+		goto err_sysctl;
 
 	ret = register_pernet_subsys(&lowpan_frags_ops);
 	if (ret)
 		goto err_pernet;
-
-	lowpan_frags.hashfn = lowpan_hashfn;
-	lowpan_frags.constructor = lowpan_frag_init;
-	lowpan_frags.destructor = NULL;
-	lowpan_frags.qsize = sizeof(struct frag_queue);
-	lowpan_frags.match = lowpan_frag_match;
-	lowpan_frags.frag_expire = lowpan_frag_expire;
-	lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
-	ret = inet_frags_init(&lowpan_frags);
-	if (ret)
-		goto err_pernet;
-
+out:
 	return ret;
 err_pernet:
 	lowpan_frags_sysctl_unregister();
+err_sysctl:
+	inet_frags_fini(&lowpan_frags);
 	return ret;
 }
 
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 90c91a7..275ef13 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1315,6 +1315,7 @@
 		if (encap)
 			skb_reset_inner_headers(skb);
 		skb->network_header = (u8 *)iph - skb->head;
+		skb_reset_mac_len(skb);
 	} while ((skb = skb->next));
 
 out:
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 78ee2fc..8e556fa 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1172,7 +1172,8 @@
 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-	struct netdev_notifier_changeupper_info *info;
+	struct netdev_notifier_changeupper_info *upper_info = ptr;
+	struct netdev_notifier_info_ext *info_ext = ptr;
 	struct in_device *in_dev;
 	struct net *net = dev_net(dev);
 	unsigned int flags;
@@ -1207,16 +1208,19 @@
 			fib_sync_up(dev, RTNH_F_LINKDOWN);
 		else
 			fib_sync_down_dev(dev, event, false);
-		/* fall through */
+		rt_cache_flush(net);
+		break;
 	case NETDEV_CHANGEMTU:
+		fib_sync_mtu(dev, info_ext->ext.mtu);
 		rt_cache_flush(net);
 		break;
 	case NETDEV_CHANGEUPPER:
-		info = ptr;
+		upper_info = ptr;
 		/* flush all routes if dev is linked to or unlinked from
 		 * an L3 master device (e.g., VRF)
 		 */
-		if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+		if (upper_info->upper_dev &&
+		    netif_is_l3_master(upper_info->upper_dev))
 			fib_disable_ip(dev, NETDEV_DOWN, true);
 		break;
 	}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index a88dab3..90c6540 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1377,6 +1377,56 @@
 	return ret;
 }
 
+/* Update the PMTU of exceptions when:
+ * - the new MTU of the first hop becomes smaller than the PMTU
+ * - the old MTU was the same as the PMTU, and it limited discovery of
+ *   larger MTUs on the path. With that limit raised, we can now
+ *   discover larger MTUs
+ * A special case is locked exceptions, for which the PMTU is smaller
+ * than the minimal accepted PMTU:
+ * - if the new MTU is greater than the PMTU, don't make any change
+ * - otherwise, unlock and set PMTU
+ */
+static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
+{
+	struct fnhe_hash_bucket *bucket;
+	int i;
+
+	bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
+	if (!bucket)
+		return;
+
+	for (i = 0; i < FNHE_HASH_SIZE; i++) {
+		struct fib_nh_exception *fnhe;
+
+		for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
+		     fnhe;
+		     fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
+			if (fnhe->fnhe_mtu_locked) {
+				if (new <= fnhe->fnhe_pmtu) {
+					fnhe->fnhe_pmtu = new;
+					fnhe->fnhe_mtu_locked = false;
+				}
+			} else if (new < fnhe->fnhe_pmtu ||
+				   orig == fnhe->fnhe_pmtu) {
+				fnhe->fnhe_pmtu = new;
+			}
+		}
+	}
+}
+
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
+{
+	unsigned int hash = fib_devindex_hashfn(dev->ifindex);
+	struct hlist_head *head = &fib_info_devhash[hash];
+	struct fib_nh *nh;
+
+	hlist_for_each_entry(nh, head, nh_hash) {
+		if (nh->nh_dev == dev)
+			nh_update_mtu(nh, dev->mtu, orig_mtu);
+	}
+}
+
 /* Event              force Flags           Description
  * NETDEV_CHANGE      0     LINKDOWN        Carrier OFF, not for scope host
  * NETDEV_DOWN        0     LINKDOWN|DEAD   Link down, not for scope host
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 11558ca..9453180 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -417,7 +417,8 @@
 	struct ip_options_rcu *opt;
 	struct rtable *rt;
 
-	opt = ireq_opt_deref(ireq);
+	rcu_read_lock();
+	opt = rcu_dereference(ireq->ireq_opt);
 
 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
@@ -431,11 +432,13 @@
 		goto no_route;
 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
 		goto route_err;
+	rcu_read_unlock();
 	return &rt->dst;
 
 route_err:
 	ip_rt_put(rt);
 no_route:
+	rcu_read_unlock();
 	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 	return NULL;
 }
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index f8b41aa..8323d33 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -25,12 +25,6 @@
 #include <net/inet_frag.h>
 #include <net/inet_ecn.h>
 
-#define INETFRAGS_EVICT_BUCKETS   128
-#define INETFRAGS_EVICT_MAX	  512
-
-/* don't rebuild inetfrag table with new secret more often than this */
-#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
-
 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
  * Value : 0xff if frame should be dropped.
  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
@@ -52,157 +46,8 @@
 };
 EXPORT_SYMBOL(ip_frag_ecn_table);
 
-static unsigned int
-inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
-{
-	return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
-}
-
-static bool inet_frag_may_rebuild(struct inet_frags *f)
-{
-	return time_after(jiffies,
-	       f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
-}
-
-static void inet_frag_secret_rebuild(struct inet_frags *f)
-{
-	int i;
-
-	write_seqlock_bh(&f->rnd_seqlock);
-
-	if (!inet_frag_may_rebuild(f))
-		goto out;
-
-	get_random_bytes(&f->rnd, sizeof(u32));
-
-	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
-		struct inet_frag_bucket *hb;
-		struct inet_frag_queue *q;
-		struct hlist_node *n;
-
-		hb = &f->hash[i];
-		spin_lock(&hb->chain_lock);
-
-		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
-			unsigned int hval = inet_frag_hashfn(f, q);
-
-			if (hval != i) {
-				struct inet_frag_bucket *hb_dest;
-
-				hlist_del(&q->list);
-
-				/* Relink to new hash chain. */
-				hb_dest = &f->hash[hval];
-
-				/* This is the only place where we take
-				 * another chain_lock while already holding
-				 * one.  As this will not run concurrently,
-				 * we cannot deadlock on hb_dest lock below, if its
-				 * already locked it will be released soon since
-				 * other caller cannot be waiting for hb lock
-				 * that we've taken above.
-				 */
-				spin_lock_nested(&hb_dest->chain_lock,
-						 SINGLE_DEPTH_NESTING);
-				hlist_add_head(&q->list, &hb_dest->chain);
-				spin_unlock(&hb_dest->chain_lock);
-			}
-		}
-		spin_unlock(&hb->chain_lock);
-	}
-
-	f->rebuild = false;
-	f->last_rebuild_jiffies = jiffies;
-out:
-	write_sequnlock_bh(&f->rnd_seqlock);
-}
-
-static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
-{
-	if (!hlist_unhashed(&q->list_evictor))
-		return false;
-
-	return q->net->low_thresh == 0 ||
-	       frag_mem_limit(q->net) >= q->net->low_thresh;
-}
-
-static unsigned int
-inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
-{
-	struct inet_frag_queue *fq;
-	struct hlist_node *n;
-	unsigned int evicted = 0;
-	HLIST_HEAD(expired);
-
-	spin_lock(&hb->chain_lock);
-
-	hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
-		if (!inet_fragq_should_evict(fq))
-			continue;
-
-		if (!del_timer(&fq->timer))
-			continue;
-
-		hlist_add_head(&fq->list_evictor, &expired);
-		++evicted;
-	}
-
-	spin_unlock(&hb->chain_lock);
-
-	hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
-		f->frag_expire((unsigned long) fq);
-
-	return evicted;
-}
-
-static void inet_frag_worker(struct work_struct *work)
-{
-	unsigned int budget = INETFRAGS_EVICT_BUCKETS;
-	unsigned int i, evicted = 0;
-	struct inet_frags *f;
-
-	f = container_of(work, struct inet_frags, frags_work);
-
-	BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
-
-	local_bh_disable();
-
-	for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
-		evicted += inet_evict_bucket(f, &f->hash[i]);
-		i = (i + 1) & (INETFRAGS_HASHSZ - 1);
-		if (evicted > INETFRAGS_EVICT_MAX)
-			break;
-	}
-
-	f->next_bucket = i;
-
-	local_bh_enable();
-
-	if (f->rebuild && inet_frag_may_rebuild(f))
-		inet_frag_secret_rebuild(f);
-}
-
-static void inet_frag_schedule_worker(struct inet_frags *f)
-{
-	if (unlikely(!work_pending(&f->frags_work)))
-		schedule_work(&f->frags_work);
-}
-
 int inet_frags_init(struct inet_frags *f)
 {
-	int i;
-
-	INIT_WORK(&f->frags_work, inet_frag_worker);
-
-	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
-		struct inet_frag_bucket *hb = &f->hash[i];
-
-		spin_lock_init(&hb->chain_lock);
-		INIT_HLIST_HEAD(&hb->chain);
-	}
-
-	seqlock_init(&f->rnd_seqlock);
-	f->last_rebuild_jiffies = 0;
 	f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
 					    NULL);
 	if (!f->frags_cachep)
@@ -214,83 +59,75 @@
 
 void inet_frags_fini(struct inet_frags *f)
 {
-	cancel_work_sync(&f->frags_work);
+	/* We must wait that all inet_frag_destroy_rcu() have completed. */
+	rcu_barrier();
+
 	kmem_cache_destroy(f->frags_cachep);
+	f->frags_cachep = NULL;
 }
 EXPORT_SYMBOL(inet_frags_fini);
 
-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
+static void inet_frags_free_cb(void *ptr, void *arg)
 {
-	unsigned int seq;
-	int i;
+	struct inet_frag_queue *fq = ptr;
 
-	nf->low_thresh = 0;
+	/* If we can not cancel the timer, it means this frag_queue
+	 * is already disappearing, we have nothing to do.
+	 * Otherwise, we own a refcount until the end of this function.
+	 */
+	if (!del_timer(&fq->timer))
+		return;
 
-evict_again:
-	local_bh_disable();
-	seq = read_seqbegin(&f->rnd_seqlock);
+	spin_lock_bh(&fq->lock);
+	if (!(fq->flags & INET_FRAG_COMPLETE)) {
+		fq->flags |= INET_FRAG_COMPLETE;
+		atomic_dec(&fq->refcnt);
+	}
+	spin_unlock_bh(&fq->lock);
 
-	for (i = 0; i < INETFRAGS_HASHSZ ; i++)
-		inet_evict_bucket(f, &f->hash[i]);
+	inet_frag_put(fq);
+}
 
-	local_bh_enable();
-	cond_resched();
+void inet_frags_exit_net(struct netns_frags *nf)
+{
+	nf->low_thresh = 0; /* prevent creation of new frags */
 
-	if (read_seqretry(&f->rnd_seqlock, seq) ||
-	    sum_frag_mem_limit(nf))
-		goto evict_again;
+	rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
 }
 EXPORT_SYMBOL(inet_frags_exit_net);
 
-static struct inet_frag_bucket *
-get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
-__acquires(hb->chain_lock)
-{
-	struct inet_frag_bucket *hb;
-	unsigned int seq, hash;
-
- restart:
-	seq = read_seqbegin(&f->rnd_seqlock);
-
-	hash = inet_frag_hashfn(f, fq);
-	hb = &f->hash[hash];
-
-	spin_lock(&hb->chain_lock);
-	if (read_seqretry(&f->rnd_seqlock, seq)) {
-		spin_unlock(&hb->chain_lock);
-		goto restart;
-	}
-
-	return hb;
-}
-
-static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
-{
-	struct inet_frag_bucket *hb;
-
-	hb = get_frag_bucket_locked(fq, f);
-	hlist_del(&fq->list);
-	fq->flags |= INET_FRAG_COMPLETE;
-	spin_unlock(&hb->chain_lock);
-}
-
-void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
+void inet_frag_kill(struct inet_frag_queue *fq)
 {
 	if (del_timer(&fq->timer))
 		atomic_dec(&fq->refcnt);
 
 	if (!(fq->flags & INET_FRAG_COMPLETE)) {
-		fq_unlink(fq, f);
+		struct netns_frags *nf = fq->net;
+
+		fq->flags |= INET_FRAG_COMPLETE;
+		rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params);
 		atomic_dec(&fq->refcnt);
 	}
 }
 EXPORT_SYMBOL(inet_frag_kill);
 
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
+static void inet_frag_destroy_rcu(struct rcu_head *head)
+{
+	struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
+						 rcu);
+	struct inet_frags *f = q->net->f;
+
+	if (f->destructor)
+		f->destructor(q);
+	kmem_cache_free(f->frags_cachep, q);
+}
+
+void inet_frag_destroy(struct inet_frag_queue *q)
 {
 	struct sk_buff *fp;
 	struct netns_frags *nf;
 	unsigned int sum, sum_truesize = 0;
+	struct inet_frags *f;
 
 	WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
 	WARN_ON(del_timer(&q->timer) != 0);
@@ -298,64 +135,35 @@
 	/* Release all fragment data. */
 	fp = q->fragments;
 	nf = q->net;
-	while (fp) {
-		struct sk_buff *xp = fp->next;
+	f = nf->f;
+	if (fp) {
+		do {
+			struct sk_buff *xp = fp->next;
 
-		sum_truesize += fp->truesize;
-		kfree_skb(fp);
-		fp = xp;
+			sum_truesize += fp->truesize;
+			kfree_skb(fp);
+			fp = xp;
+		} while (fp);
+	} else {
+		sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
 	}
 	sum = sum_truesize + f->qsize;
 
-	if (f->destructor)
-		f->destructor(q);
-	kmem_cache_free(f->frags_cachep, q);
+	call_rcu(&q->rcu, inet_frag_destroy_rcu);
 
 	sub_frag_mem_limit(nf, sum);
 }
 EXPORT_SYMBOL(inet_frag_destroy);
 
-static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
-						struct inet_frag_queue *qp_in,
-						struct inet_frags *f,
-						void *arg)
-{
-	struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
-	struct inet_frag_queue *qp;
-
-#ifdef CONFIG_SMP
-	/* With SMP race we have to recheck hash table, because
-	 * such entry could have been created on other cpu before
-	 * we acquired hash bucket lock.
-	 */
-	hlist_for_each_entry(qp, &hb->chain, list) {
-		if (qp->net == nf && f->match(qp, arg)) {
-			atomic_inc(&qp->refcnt);
-			spin_unlock(&hb->chain_lock);
-			qp_in->flags |= INET_FRAG_COMPLETE;
-			inet_frag_put(qp_in, f);
-			return qp;
-		}
-	}
-#endif
-	qp = qp_in;
-	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
-		atomic_inc(&qp->refcnt);
-
-	atomic_inc(&qp->refcnt);
-	hlist_add_head(&qp->list, &hb->chain);
-
-	spin_unlock(&hb->chain_lock);
-
-	return qp;
-}
-
 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
 					       struct inet_frags *f,
 					       void *arg)
 {
 	struct inet_frag_queue *q;
 
+	if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
+		return NULL;
+
 	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
 	if (!q)
 		return NULL;
@@ -366,75 +174,50 @@
 
 	setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
 	spin_lock_init(&q->lock);
-	atomic_set(&q->refcnt, 1);
+	atomic_set(&q->refcnt, 3);
 
 	return q;
 }
 
 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
-						struct inet_frags *f,
 						void *arg)
 {
+	struct inet_frags *f = nf->f;
 	struct inet_frag_queue *q;
+	int err;
 
 	q = inet_frag_alloc(nf, f, arg);
 	if (!q)
 		return NULL;
 
-	return inet_frag_intern(nf, q, f, arg);
-}
+	mod_timer(&q->timer, jiffies + nf->timeout);
 
-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
-				       struct inet_frags *f, void *key,
-				       unsigned int hash)
-{
-	struct inet_frag_bucket *hb;
-	struct inet_frag_queue *q;
-	int depth = 0;
-
-	if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
-		inet_frag_schedule_worker(f);
+	err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
+				     f->rhash_params);
+	if (err < 0) {
+		q->flags |= INET_FRAG_COMPLETE;
+		inet_frag_kill(q);
+		inet_frag_destroy(q);
 		return NULL;
 	}
+	return q;
+}
+EXPORT_SYMBOL(inet_frag_create);
 
-	if (frag_mem_limit(nf) > nf->low_thresh)
-		inet_frag_schedule_worker(f);
+/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
+struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
+{
+	struct inet_frag_queue *fq;
 
-	hash &= (INETFRAGS_HASHSZ - 1);
-	hb = &f->hash[hash];
-
-	spin_lock(&hb->chain_lock);
-	hlist_for_each_entry(q, &hb->chain, list) {
-		if (q->net == nf && f->match(q, key)) {
-			atomic_inc(&q->refcnt);
-			spin_unlock(&hb->chain_lock);
-			return q;
-		}
-		depth++;
+	rcu_read_lock();
+	fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
+	if (fq) {
+		if (!atomic_inc_not_zero(&fq->refcnt))
+			fq = NULL;
+		rcu_read_unlock();
+		return fq;
 	}
-	spin_unlock(&hb->chain_lock);
-
-	if (depth <= INETFRAGS_MAXDEPTH)
-		return inet_frag_create(nf, f, key);
-
-	if (inet_frag_may_rebuild(f)) {
-		if (!f->rebuild)
-			f->rebuild = true;
-		inet_frag_schedule_worker(f);
-	}
-
-	return ERR_PTR(-ENOBUFS);
+	rcu_read_unlock();
+	return inet_frag_create(nf, key);
 }
 EXPORT_SYMBOL(inet_frag_find);
-
-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
-				   const char *prefix)
-{
-	static const char msg[] = "inet_frag_find: Fragment hash bucket"
-		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
-		". Dropping fragment.\n";
-
-	if (PTR_ERR(q) == -ENOBUFS)
-		net_dbg_ratelimited("%s%s", prefix, msg);
-}
-EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 752711c..cc8c6ac 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -56,27 +56,64 @@
  */
 static const char ip_frag_cache_name[] = "ip4-frags";
 
-struct ipfrag_skb_cb
-{
+/* Use skb->cb to track consecutive/adjacent fragments coming at
+ * the end of the queue. Nodes in the rb-tree queue will
+ * contain "runs" of one or more adjacent fragments.
+ *
+ * Invariants:
+ * - next_frag is NULL at the tail of a "run";
+ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
+ */
+struct ipfrag_skb_cb {
 	struct inet_skb_parm	h;
-	int			offset;
+	struct sk_buff		*next_frag;
+	int			frag_run_len;
 };
 
-#define FRAG_CB(skb)	((struct ipfrag_skb_cb *)((skb)->cb))
+#define FRAG_CB(skb)		((struct ipfrag_skb_cb *)((skb)->cb))
+
+static void ip4_frag_init_run(struct sk_buff *skb)
+{
+	BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
+
+	FRAG_CB(skb)->next_frag = NULL;
+	FRAG_CB(skb)->frag_run_len = skb->len;
+}
+
+/* Append skb to the last "run". */
+static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
+					struct sk_buff *skb)
+{
+	RB_CLEAR_NODE(&skb->rbnode);
+	FRAG_CB(skb)->next_frag = NULL;
+
+	FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
+	FRAG_CB(q->fragments_tail)->next_frag = skb;
+	q->fragments_tail = skb;
+}
+
+/* Create a new "run" with the skb. */
+static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
+{
+	if (q->last_run_head)
+		rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
+			     &q->last_run_head->rbnode.rb_right);
+	else
+		rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
+	rb_insert_color(&skb->rbnode, &q->rb_fragments);
+
+	ip4_frag_init_run(skb);
+	q->fragments_tail = skb;
+	q->last_run_head = skb;
+}
 
 /* Describe an entry in the "incomplete datagrams" queue. */
 struct ipq {
 	struct inet_frag_queue q;
 
-	u32		user;
-	__be32		saddr;
-	__be32		daddr;
-	__be16		id;
-	u8		protocol;
 	u8		ecn; /* RFC3168 support */
 	u16		max_df_size; /* largest frag with DF set seen */
 	int             iif;
-	int             vif;   /* L3 master device index */
 	unsigned int    rid;
 	struct inet_peer *peer;
 };
@@ -88,49 +125,9 @@
 
 static struct inet_frags ip4_frags;
 
-int ip_frag_mem(struct net *net)
-{
-	return sum_frag_mem_limit(&net->ipv4.frags);
-}
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+			 struct sk_buff *prev_tail, struct net_device *dev);
 
-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
-			 struct net_device *dev);
-
-struct ip4_create_arg {
-	struct iphdr *iph;
-	u32 user;
-	int vif;
-};
-
-static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
-{
-	net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
-	return jhash_3words((__force u32)id << 16 | prot,
-			    (__force u32)saddr, (__force u32)daddr,
-			    ip4_frags.rnd);
-}
-
-static unsigned int ip4_hashfn(const struct inet_frag_queue *q)
-{
-	const struct ipq *ipq;
-
-	ipq = container_of(q, struct ipq, q);
-	return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
-}
-
-static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
-{
-	const struct ipq *qp;
-	const struct ip4_create_arg *arg = a;
-
-	qp = container_of(q, struct ipq, q);
-	return	qp->id == arg->iph->id &&
-		qp->saddr == arg->iph->saddr &&
-		qp->daddr == arg->iph->daddr &&
-		qp->protocol == arg->iph->protocol &&
-		qp->user == arg->user &&
-		qp->vif == arg->vif;
-}
 
 static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
 {
@@ -139,17 +136,12 @@
 					       frags);
 	struct net *net = container_of(ipv4, struct net, ipv4);
 
-	const struct ip4_create_arg *arg = a;
+	const struct frag_v4_compare_key *key = a;
 
-	qp->protocol = arg->iph->protocol;
-	qp->id = arg->iph->id;
-	qp->ecn = ip4_frag_ecn(arg->iph->tos);
-	qp->saddr = arg->iph->saddr;
-	qp->daddr = arg->iph->daddr;
-	qp->vif = arg->vif;
-	qp->user = arg->user;
+	q->key.v4 = *key;
+	qp->ecn = 0;
 	qp->peer = q->net->max_dist ?
-		inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) :
+		inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
 		NULL;
 }
 
@@ -167,7 +159,7 @@
 
 static void ipq_put(struct ipq *ipq)
 {
-	inet_frag_put(&ipq->q, &ip4_frags);
+	inet_frag_put(&ipq->q);
 }
 
 /* Kill ipq entry. It is not destroyed immediately,
@@ -175,7 +167,7 @@
  */
 static void ipq_kill(struct ipq *ipq)
 {
-	inet_frag_kill(&ipq->q, &ip4_frags);
+	inet_frag_kill(&ipq->q);
 }
 
 static bool frag_expire_skip_icmp(u32 user)
@@ -192,8 +184,11 @@
  */
 static void ip_expire(unsigned long arg)
 {
-	struct ipq *qp;
+	const struct iphdr *iph;
+	struct sk_buff *head = NULL;
 	struct net *net;
+	struct ipq *qp;
+	int err;
 
 	qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
 	net = container_of(qp->q.net, struct net, ipv4.frags);
@@ -206,51 +201,65 @@
 
 	ipq_kill(qp);
 	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
+	__IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
 
-	if (!inet_frag_evicting(&qp->q)) {
-		struct sk_buff *clone, *head = qp->q.fragments;
-		const struct iphdr *iph;
-		int err;
+	if (!(qp->q.flags & INET_FRAG_FIRST_IN))
+		goto out;
 
-		__IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
-
-		if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
+	/* sk_buff::dev and sk_buff::rbnode are unionized. So we
+	 * pull the head out of the tree in order to be able to
+	 * deal with head->dev.
+	 */
+	if (qp->q.fragments) {
+		head = qp->q.fragments;
+		qp->q.fragments = head->next;
+	} else {
+		head = skb_rb_first(&qp->q.rb_fragments);
+		if (!head)
 			goto out;
-
-		head->dev = dev_get_by_index_rcu(net, qp->iif);
-		if (!head->dev)
-			goto out;
-
-
-		/* skb has no dst, perform route lookup again */
-		iph = ip_hdr(head);
-		err = ip_route_input_noref(head, iph->daddr, iph->saddr,
-					   iph->tos, head->dev);
-		if (err)
-			goto out;
-
-		/* Only an end host needs to send an ICMP
-		 * "Fragment Reassembly Timeout" message, per RFC792.
-		 */
-		if (frag_expire_skip_icmp(qp->user) &&
-		    (skb_rtable(head)->rt_type != RTN_LOCAL))
-			goto out;
-
-		clone = skb_clone(head, GFP_ATOMIC);
-
-		/* Send an ICMP "Fragment Reassembly Timeout" message. */
-		if (clone) {
-			spin_unlock(&qp->q.lock);
-			icmp_send(clone, ICMP_TIME_EXCEEDED,
-				  ICMP_EXC_FRAGTIME, 0);
-			consume_skb(clone);
-			goto out_rcu_unlock;
-		}
+		if (FRAG_CB(head)->next_frag)
+			rb_replace_node(&head->rbnode,
+					&FRAG_CB(head)->next_frag->rbnode,
+					&qp->q.rb_fragments);
+		else
+			rb_erase(&head->rbnode, &qp->q.rb_fragments);
+		memset(&head->rbnode, 0, sizeof(head->rbnode));
+		barrier();
 	}
+	if (head == qp->q.fragments_tail)
+		qp->q.fragments_tail = NULL;
+
+	sub_frag_mem_limit(qp->q.net, head->truesize);
+
+	head->dev = dev_get_by_index_rcu(net, qp->iif);
+	if (!head->dev)
+		goto out;
+
+
+	/* skb has no dst, perform route lookup again */
+	iph = ip_hdr(head);
+	err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+					   iph->tos, head->dev);
+	if (err)
+		goto out;
+
+	/* Only an end host needs to send an ICMP
+	 * "Fragment Reassembly Timeout" message, per RFC792.
+	 */
+	if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
+	    (skb_rtable(head)->rt_type != RTN_LOCAL))
+		goto out;
+
+	spin_unlock(&qp->q.lock);
+	icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
+	goto out_rcu_unlock;
+
 out:
 	spin_unlock(&qp->q.lock);
 out_rcu_unlock:
 	rcu_read_unlock();
+	if (head)
+		kfree_skb(head);
 	ipq_put(qp);
 }
 
@@ -260,21 +269,20 @@
 static struct ipq *ip_find(struct net *net, struct iphdr *iph,
 			   u32 user, int vif)
 {
+	struct frag_v4_compare_key key = {
+		.saddr = iph->saddr,
+		.daddr = iph->daddr,
+		.user = user,
+		.vif = vif,
+		.id = iph->id,
+		.protocol = iph->protocol,
+	};
 	struct inet_frag_queue *q;
-	struct ip4_create_arg arg;
-	unsigned int hash;
 
-	arg.iph = iph;
-	arg.user = user;
-	arg.vif = vif;
-
-	hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
-
-	q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
-	if (IS_ERR_OR_NULL(q)) {
-		inet_frag_maybe_warn_overflow(q, pr_fmt());
+	q = inet_frag_find(&net->ipv4.frags, &key);
+	if (!q)
 		return NULL;
-	}
+
 	return container_of(q, struct ipq, q);
 }
 
@@ -294,7 +302,7 @@
 	end = atomic_inc_return(&peer->rid);
 	qp->rid = end;
 
-	rc = qp->q.fragments && (end - start) > max;
+	rc = qp->q.fragments_tail && (end - start) > max;
 
 	if (rc) {
 		struct net *net;
@@ -308,7 +316,6 @@
 
 static int ip_frag_reinit(struct ipq *qp)
 {
-	struct sk_buff *fp;
 	unsigned int sum_truesize = 0;
 
 	if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
@@ -316,21 +323,16 @@
 		return -ETIMEDOUT;
 	}
 
-	fp = qp->q.fragments;
-	do {
-		struct sk_buff *xp = fp->next;
-
-		sum_truesize += fp->truesize;
-		kfree_skb(fp);
-		fp = xp;
-	} while (fp);
+	sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
 	sub_frag_mem_limit(qp->q.net, sum_truesize);
 
 	qp->q.flags = 0;
 	qp->q.len = 0;
 	qp->q.meat = 0;
 	qp->q.fragments = NULL;
+	qp->q.rb_fragments = RB_ROOT;
 	qp->q.fragments_tail = NULL;
+	qp->q.last_run_head = NULL;
 	qp->iif = 0;
 	qp->ecn = 0;
 
@@ -340,7 +342,9 @@
 /* Add new segment to existing queue. */
 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
 {
-	struct sk_buff *prev, *next;
+	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+	struct rb_node **rbn, *parent;
+	struct sk_buff *skb1, *prev_tail;
 	struct net_device *dev;
 	unsigned int fragsize;
 	int flags, offset;
@@ -403,99 +407,61 @@
 	if (err)
 		goto err;
 
-	/* Find out which fragments are in front and at the back of us
-	 * in the chain of fragments so far.  We must know where to put
-	 * this fragment, right?
-	 */
-	prev = qp->q.fragments_tail;
-	if (!prev || FRAG_CB(prev)->offset < offset) {
-		next = NULL;
-		goto found;
-	}
-	prev = NULL;
-	for (next = qp->q.fragments; next != NULL; next = next->next) {
-		if (FRAG_CB(next)->offset >= offset)
-			break;	/* bingo! */
-		prev = next;
-	}
-
-found:
-	/* We found where to put this one.  Check for overlap with
-	 * preceding fragment, and, if needed, align things so that
-	 * any overlaps are eliminated.
-	 */
-	if (prev) {
-		int i = (FRAG_CB(prev)->offset + prev->len) - offset;
-
-		if (i > 0) {
-			offset += i;
-			err = -EINVAL;
-			if (end <= offset)
-				goto err;
-			err = -ENOMEM;
-			if (!pskb_pull(skb, i))
-				goto err;
-			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
-				skb->ip_summed = CHECKSUM_NONE;
-		}
-	}
-
-	err = -ENOMEM;
-
-	while (next && FRAG_CB(next)->offset < end) {
-		int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
-
-		if (i < next->len) {
-			int delta = -next->truesize;
-
-			/* Eat head of the next overlapped fragment
-			 * and leave the loop. The next ones cannot overlap.
-			 */
-			if (!pskb_pull(next, i))
-				goto err;
-			delta += next->truesize;
-			if (delta)
-				add_frag_mem_limit(qp->q.net, delta);
-			FRAG_CB(next)->offset += i;
-			qp->q.meat -= i;
-			if (next->ip_summed != CHECKSUM_UNNECESSARY)
-				next->ip_summed = CHECKSUM_NONE;
-			break;
-		} else {
-			struct sk_buff *free_it = next;
-
-			/* Old fragment is completely overridden with
-			 * new one drop it.
-			 */
-			next = next->next;
-
-			if (prev)
-				prev->next = next;
-			else
-				qp->q.fragments = next;
-
-			qp->q.meat -= free_it->len;
-			sub_frag_mem_limit(qp->q.net, free_it->truesize);
-			kfree_skb(free_it);
-		}
-	}
-
-	FRAG_CB(skb)->offset = offset;
-
-	/* Insert this fragment in the chain of fragments. */
-	skb->next = next;
-	if (!next)
-		qp->q.fragments_tail = skb;
-	if (prev)
-		prev->next = skb;
-	else
-		qp->q.fragments = skb;
-
+	/* Note : skb->rbnode and skb->dev share the same location. */
 	dev = skb->dev;
-	if (dev) {
-		qp->iif = dev->ifindex;
-		skb->dev = NULL;
+	/* Makes sure compiler wont do silly aliasing games */
+	barrier();
+
+	/* RFC5722, Section 4, amended by Errata ID : 3089
+	 *                          When reassembling an IPv6 datagram, if
+	 *   one or more its constituent fragments is determined to be an
+	 *   overlapping fragment, the entire datagram (and any constituent
+	 *   fragments) MUST be silently discarded.
+	 *
+	 * We do the same here for IPv4 (and increment an snmp counter).
+	 */
+
+	/* Find out where to put this fragment.  */
+	prev_tail = qp->q.fragments_tail;
+	if (!prev_tail)
+		ip4_frag_create_run(&qp->q, skb);  /* First fragment. */
+	else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
+		/* This is the common case: skb goes to the end. */
+		/* Detect and discard overlaps. */
+		if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
+			goto discard_qp;
+		if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
+			ip4_frag_append_to_last_run(&qp->q, skb);
+		else
+			ip4_frag_create_run(&qp->q, skb);
+	} else {
+		/* Binary search. Note that skb can become the first fragment,
+		 * but not the last (covered above).
+		 */
+		rbn = &qp->q.rb_fragments.rb_node;
+		do {
+			parent = *rbn;
+			skb1 = rb_to_skb(parent);
+			if (end <= skb1->ip_defrag_offset)
+				rbn = &parent->rb_left;
+			else if (offset >= skb1->ip_defrag_offset +
+						FRAG_CB(skb1)->frag_run_len)
+				rbn = &parent->rb_right;
+			else /* Found an overlap with skb1. */
+				goto discard_qp;
+		} while (*rbn);
+		/* Here we have parent properly set, and rbn pointing to
+		 * one of its NULL left/right children. Insert skb.
+		 */
+		ip4_frag_init_run(skb);
+		rb_link_node(&skb->rbnode, parent, rbn);
+		rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
 	}
+
+	if (dev)
+		qp->iif = dev->ifindex;
+	skb->ip_defrag_offset = offset;
+
 	qp->q.stamp = skb->tstamp;
 	qp->q.meat += skb->len;
 	qp->ecn |= ecn;
@@ -517,7 +483,7 @@
 		unsigned long orefdst = skb->_skb_refdst;
 
 		skb->_skb_refdst = 0UL;
-		err = ip_frag_reasm(qp, prev, dev);
+		err = ip_frag_reasm(qp, skb, prev_tail, dev);
 		skb->_skb_refdst = orefdst;
 		return err;
 	}
@@ -525,20 +491,24 @@
 	skb_dst_drop(skb);
 	return -EINPROGRESS;
 
+discard_qp:
+	inet_frag_kill(&qp->q);
+	err = -EINVAL;
+	__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
 err:
 	kfree_skb(skb);
 	return err;
 }
 
-
 /* Build a new IP datagram from all its fragments. */
-
-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
-			 struct net_device *dev)
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+			 struct sk_buff *prev_tail, struct net_device *dev)
 {
 	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
 	struct iphdr *iph;
-	struct sk_buff *fp, *head = qp->q.fragments;
+	struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
+	struct sk_buff **nextp; /* To build frag_list. */
+	struct rb_node *rbn;
 	int len;
 	int ihlen;
 	int err;
@@ -552,26 +522,27 @@
 		goto out_fail;
 	}
 	/* Make the one we just received the head. */
-	if (prev) {
-		head = prev->next;
-		fp = skb_clone(head, GFP_ATOMIC);
+	if (head != skb) {
+		fp = skb_clone(skb, GFP_ATOMIC);
 		if (!fp)
 			goto out_nomem;
-
-		fp->next = head->next;
-		if (!fp->next)
+		FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
+		if (RB_EMPTY_NODE(&skb->rbnode))
+			FRAG_CB(prev_tail)->next_frag = fp;
+		else
+			rb_replace_node(&skb->rbnode, &fp->rbnode,
+					&qp->q.rb_fragments);
+		if (qp->q.fragments_tail == skb)
 			qp->q.fragments_tail = fp;
-		prev->next = fp;
-
-		skb_morph(head, qp->q.fragments);
-		head->next = qp->q.fragments->next;
-
-		consume_skb(qp->q.fragments);
-		qp->q.fragments = head;
+		skb_morph(skb, head);
+		FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+		rb_replace_node(&head->rbnode, &skb->rbnode,
+				&qp->q.rb_fragments);
+		consume_skb(head);
+		head = skb;
 	}
 
-	WARN_ON(!head);
-	WARN_ON(FRAG_CB(head)->offset != 0);
+	WARN_ON(head->ip_defrag_offset != 0);
 
 	/* Allocate a new buffer for the datagram. */
 	ihlen = ip_hdrlen(head);
@@ -595,35 +566,61 @@
 		clone = alloc_skb(0, GFP_ATOMIC);
 		if (!clone)
 			goto out_nomem;
-		clone->next = head->next;
-		head->next = clone;
 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
 		skb_frag_list_init(head);
 		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
 			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
 		clone->len = clone->data_len = head->data_len - plen;
-		head->data_len -= clone->len;
-		head->len -= clone->len;
+		head->truesize += clone->truesize;
 		clone->csum = 0;
 		clone->ip_summed = head->ip_summed;
 		add_frag_mem_limit(qp->q.net, clone->truesize);
+		skb_shinfo(head)->frag_list = clone;
+		nextp = &clone->next;
+	} else {
+		nextp = &skb_shinfo(head)->frag_list;
 	}
 
-	skb_shinfo(head)->frag_list = head->next;
 	skb_push(head, head->data - skb_network_header(head));
 
-	for (fp=head->next; fp; fp = fp->next) {
-		head->data_len += fp->len;
-		head->len += fp->len;
-		if (head->ip_summed != fp->ip_summed)
-			head->ip_summed = CHECKSUM_NONE;
-		else if (head->ip_summed == CHECKSUM_COMPLETE)
-			head->csum = csum_add(head->csum, fp->csum);
-		head->truesize += fp->truesize;
+	/* Traverse the tree in order, to build frag_list. */
+	fp = FRAG_CB(head)->next_frag;
+	rbn = rb_next(&head->rbnode);
+	rb_erase(&head->rbnode, &qp->q.rb_fragments);
+	while (rbn || fp) {
+		/* fp points to the next sk_buff in the current run;
+		 * rbn points to the next run.
+		 */
+		/* Go through the current run. */
+		while (fp) {
+			*nextp = fp;
+			nextp = &fp->next;
+			fp->prev = NULL;
+			memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+			fp->sk = NULL;
+			head->data_len += fp->len;
+			head->len += fp->len;
+			if (head->ip_summed != fp->ip_summed)
+				head->ip_summed = CHECKSUM_NONE;
+			else if (head->ip_summed == CHECKSUM_COMPLETE)
+				head->csum = csum_add(head->csum, fp->csum);
+			head->truesize += fp->truesize;
+			fp = FRAG_CB(fp)->next_frag;
+		}
+		/* Move to the next run. */
+		if (rbn) {
+			struct rb_node *rbnext = rb_next(rbn);
+
+			fp = rb_to_skb(rbn);
+			rb_erase(rbn, &qp->q.rb_fragments);
+			rbn = rbnext;
+		}
 	}
 	sub_frag_mem_limit(qp->q.net, head->truesize);
 
+	*nextp = NULL;
 	head->next = NULL;
+	head->prev = NULL;
 	head->dev = dev;
 	head->tstamp = qp->q.stamp;
 	IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
@@ -651,7 +648,9 @@
 
 	__IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
 	qp->q.fragments = NULL;
+	qp->q.rb_fragments = RB_ROOT;
 	qp->q.fragments_tail = NULL;
+	qp->q.last_run_head = NULL;
 	return 0;
 
 out_nomem:
@@ -659,7 +658,7 @@
 	err = -ENOMEM;
 	goto out_fail;
 out_oversize:
-	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
+	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
 out_fail:
 	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
 	return err;
@@ -733,25 +732,46 @@
 }
 EXPORT_SYMBOL(ip_check_defrag);
 
+unsigned int inet_frag_rbtree_purge(struct rb_root *root)
+{
+	struct rb_node *p = rb_first(root);
+	unsigned int sum = 0;
+
+	while (p) {
+		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+		p = rb_next(p);
+		rb_erase(&skb->rbnode, root);
+		while (skb) {
+			struct sk_buff *next = FRAG_CB(skb)->next_frag;
+
+			sum += skb->truesize;
+			kfree_skb(skb);
+			skb = next;
+		}
+	}
+	return sum;
+}
+EXPORT_SYMBOL(inet_frag_rbtree_purge);
+
 #ifdef CONFIG_SYSCTL
-static int zero;
+static int dist_min;
 
 static struct ctl_table ip4_frags_ns_ctl_table[] = {
 	{
 		.procname	= "ipfrag_high_thresh",
 		.data		= &init_net.ipv4.frags.high_thresh,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= proc_doulongvec_minmax,
 		.extra1		= &init_net.ipv4.frags.low_thresh
 	},
 	{
 		.procname	= "ipfrag_low_thresh",
 		.data		= &init_net.ipv4.frags.low_thresh,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
+		.proc_handler	= proc_doulongvec_minmax,
 		.extra2		= &init_net.ipv4.frags.high_thresh
 	},
 	{
@@ -767,7 +787,7 @@
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero
+		.extra1		= &dist_min,
 	},
 	{ }
 };
@@ -849,6 +869,8 @@
 
 static int __net_init ipv4_frags_init_net(struct net *net)
 {
+	int res;
+
 	/* Fragment cache limits.
 	 *
 	 * The fragment memory accounting code, (tries to) account for
@@ -873,16 +895,21 @@
 	net->ipv4.frags.timeout = IP_FRAG_TIME;
 
 	net->ipv4.frags.max_dist = 64;
+	net->ipv4.frags.f = &ip4_frags;
 
-	inet_frags_init_net(&net->ipv4.frags);
-
-	return ip4_frags_ns_ctl_register(net);
+	res = inet_frags_init_net(&net->ipv4.frags);
+	if (res < 0)
+		return res;
+	res = ip4_frags_ns_ctl_register(net);
+	if (res < 0)
+		inet_frags_exit_net(&net->ipv4.frags);
+	return res;
 }
 
 static void __net_exit ipv4_frags_exit_net(struct net *net)
 {
 	ip4_frags_ns_ctl_unregister(net);
-	inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
+	inet_frags_exit_net(&net->ipv4.frags);
 }
 
 static struct pernet_operations ip4_frags_ops = {
@@ -890,17 +917,49 @@
 	.exit = ipv4_frags_exit_net,
 };
 
+
+static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
+{
+	return jhash2(data,
+		      sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
+}
+
+static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+	const struct inet_frag_queue *fq = data;
+
+	return jhash2((const u32 *)&fq->key.v4,
+		      sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
+}
+
+static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+	const struct frag_v4_compare_key *key = arg->key;
+	const struct inet_frag_queue *fq = ptr;
+
+	return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+static const struct rhashtable_params ip4_rhash_params = {
+	.head_offset		= offsetof(struct inet_frag_queue, node),
+	.key_offset		= offsetof(struct inet_frag_queue, key),
+	.key_len		= sizeof(struct frag_v4_compare_key),
+	.hashfn			= ip4_key_hashfn,
+	.obj_hashfn		= ip4_obj_hashfn,
+	.obj_cmpfn		= ip4_obj_cmpfn,
+	.automatic_shrinking	= true,
+};
+
 void __init ipfrag_init(void)
 {
-	ip4_frags_ctl_register();
-	register_pernet_subsys(&ip4_frags_ops);
-	ip4_frags.hashfn = ip4_hashfn;
 	ip4_frags.constructor = ip4_frag_init;
 	ip4_frags.destructor = ip4_frag_free;
 	ip4_frags.qsize = sizeof(struct ipq);
-	ip4_frags.match = ip4_frag_match;
 	ip4_frags.frag_expire = ip_expire;
 	ip4_frags.frags_cache_name = ip_frag_cache_name;
+	ip4_frags.rhash_params = ip4_rhash_params;
 	if (inet_frags_init(&ip4_frags))
 		panic("IP: failed to allocate ip4_frags cache\n");
+	ip4_frags_ctl_register();
+	register_pernet_subsys(&ip4_frags_ops);
 }
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index b21e435..a5851c0 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -134,7 +134,6 @@
 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
 {
 	struct sockaddr_in sin;
-	const struct iphdr *iph = ip_hdr(skb);
 	__be16 *ports;
 	int end;
 
@@ -149,7 +148,7 @@
 	ports = (__be16 *)skb_transport_header(skb);
 
 	sin.sin_family = AF_INET;
-	sin.sin_addr.s_addr = iph->daddr;
+	sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
 	sin.sin_port = ports[1];
 	memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
 
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index e1271e75..d8d99c2 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -627,6 +627,7 @@
 		    const struct iphdr *tnl_params, u8 protocol)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
+	unsigned int inner_nhdr_len = 0;
 	const struct iphdr *inner_iph;
 	struct flowi4 fl4;
 	u8     tos, ttl;
@@ -636,6 +637,14 @@
 	__be32 dst;
 	bool connected;
 
+	/* ensure we can access the inner net header, for several users below */
+	if (skb->protocol == htons(ETH_P_IP))
+		inner_nhdr_len = sizeof(struct iphdr);
+	else if (skb->protocol == htons(ETH_P_IPV6))
+		inner_nhdr_len = sizeof(struct ipv6hdr);
+	if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
+		goto tx_error;
+
 	inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
 	connected = (tunnel->parms.iph.daddr != 0);
 
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 7143ca1..ec48d8ea 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -54,7 +54,6 @@
 static int sockstat_seq_show(struct seq_file *seq, void *v)
 {
 	struct net *net = seq->private;
-	unsigned int frag_mem;
 	int orphans, sockets;
 
 	local_bh_disable();
@@ -74,8 +73,9 @@
 		   sock_prot_inuse_get(net, &udplite_prot));
 	seq_printf(seq, "RAW: inuse %d\n",
 		   sock_prot_inuse_get(net, &raw_prot));
-	frag_mem = ip_frag_mem(net);
-	seq_printf(seq,  "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem);
+	seq_printf(seq,  "FRAG: inuse %u memory %lu\n",
+		   atomic_read(&net->ipv4.frags.rhashtable.nelems),
+		   frag_mem_limit(&net->ipv4.frags));
 	return 0;
 }
 
@@ -134,6 +134,7 @@
 	SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
 	SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
 	SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
+	SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
 	SNMP_MIB_SENTINEL
 };
 
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8fc8c8d..5f6dc5f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4407,7 +4407,7 @@
 
 	p = rb_first(&tp->out_of_order_queue);
 	while (p) {
-		skb = rb_entry(p, struct sk_buff, rbnode);
+		skb = rb_to_skb(p);
 		if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
 			break;
 
@@ -4471,7 +4471,7 @@
 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
-	struct rb_node **p, *q, *parent;
+	struct rb_node **p, *parent;
 	struct sk_buff *skb1;
 	u32 seq, end_seq;
 	bool fragstolen;
@@ -4530,7 +4530,7 @@
 	parent = NULL;
 	while (*p) {
 		parent = *p;
-		skb1 = rb_entry(parent, struct sk_buff, rbnode);
+		skb1 = rb_to_skb(parent);
 		if (before(seq, TCP_SKB_CB(skb1)->seq)) {
 			p = &parent->rb_left;
 			continue;
@@ -4575,9 +4575,7 @@
 
 merge_right:
 	/* Remove other segments covered by skb. */
-	while ((q = rb_next(&skb->rbnode)) != NULL) {
-		skb1 = rb_entry(q, struct sk_buff, rbnode);
-
+	while ((skb1 = skb_rb_next(skb)) != NULL) {
 		if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
 			break;
 		if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
@@ -4592,7 +4590,7 @@
 		tcp_drop(sk, skb1);
 	}
 	/* If there is no skb after us, we are the last_skb ! */
-	if (!q)
+	if (!skb1)
 		tp->ooo_last_skb = skb;
 
 add_sack:
@@ -4793,7 +4791,7 @@
 	if (list)
 		return !skb_queue_is_last(list, skb) ? skb->next : NULL;
 
-	return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
+	return skb_rb_next(skb);
 }
 
 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
@@ -4822,7 +4820,7 @@
 
 	while (*p) {
 		parent = *p;
-		skb1 = rb_entry(parent, struct sk_buff, rbnode);
+		skb1 = rb_to_skb(parent);
 		if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
 			p = &parent->rb_left;
 		else
@@ -4942,19 +4940,12 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	u32 range_truesize, sum_tiny = 0;
 	struct sk_buff *skb, *head;
-	struct rb_node *p;
 	u32 start, end;
 
-	p = rb_first(&tp->out_of_order_queue);
-	skb = rb_entry_safe(p, struct sk_buff, rbnode);
+	skb = skb_rb_first(&tp->out_of_order_queue);
 new_range:
 	if (!skb) {
-		p = rb_last(&tp->out_of_order_queue);
-		/* Note: This is possible p is NULL here. We do not
-		 * use rb_entry_safe(), as ooo_last_skb is valid only
-		 * if rbtree is not empty.
-		 */
-		tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
+		tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
 		return;
 	}
 	start = TCP_SKB_CB(skb)->seq;
@@ -4962,7 +4953,7 @@
 	range_truesize = skb->truesize;
 
 	for (head = skb;;) {
-		skb = tcp_skb_next(skb, NULL);
+		skb = skb_rb_next(skb);
 
 		/* Range is terminated when we see a gap or when
 		 * we are at the queue end.
@@ -5018,7 +5009,7 @@
 		prev = rb_prev(node);
 		rb_erase(node, &tp->out_of_order_queue);
 		goal -= rb_to_skb(node)->truesize;
-		tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
+		tcp_drop(sk, rb_to_skb(node));
 		if (!prev || goal <= 0) {
 			sk_mem_reclaim(sk);
 			if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
@@ -5028,7 +5019,7 @@
 		}
 		node = prev;
 	} while (node);
-	tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
+	tp->ooo_last_skb = rb_to_skb(prev);
 
 	/* Reset SACK state.  A conforming SACK implementation will
 	 * do the same at a timeout based retransmit.  When a connection
@@ -5980,11 +5971,13 @@
 			if (th->fin)
 				goto discard;
 			/* It is possible that we process SYN packets from backlog,
-			 * so we need to make sure to disable BH right there.
+			 * so we need to make sure to disable BH and RCU right there.
 			 */
+			rcu_read_lock();
 			local_bh_disable();
 			acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
 			local_bh_enable();
+			rcu_read_unlock();
 
 			if (!acceptable)
 				return 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 07c8fd2..9bfa876 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -867,9 +867,11 @@
 	if (skb) {
 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
+		rcu_read_lock();
 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
 					    ireq->ir_rmt_addr,
-					    ireq_opt_deref(ireq));
+					    rcu_dereference(ireq->ireq_opt));
+		rcu_read_unlock();
 		err = net_xmit_eval(err);
 	}
 
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 885cc39..789bbcb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1740,6 +1740,28 @@
 							 inet_compute_pseudo);
 }
 
+/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+ * return code conversion for ip layer consumption
+ */
+static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
+			       struct udphdr *uh)
+{
+	int ret;
+
+	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+		skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+					 inet_compute_pseudo);
+
+	ret = udp_queue_rcv_skb(sk, skb);
+
+	/* a return value > 0 means to resubmit the input, but
+	 * it wants the return to be -protocol, or 0
+	 */
+	if (ret > 0)
+		return -ret;
+	return 0;
+}
+
 /*
  *	All we need to do is get the socket, and then do a checksum.
  */
@@ -1786,14 +1808,9 @@
 		if (unlikely(sk->sk_rx_dst != dst))
 			udp_sk_rx_dst_set(sk, dst);
 
-		ret = udp_queue_rcv_skb(sk, skb);
+		ret = udp_unicast_rcv_skb(sk, skb, uh);
 		sock_put(sk);
-		/* a return value > 0 means to resubmit the input, but
-		 * it wants the return to be -protocol, or 0
-		 */
-		if (ret > 0)
-			return -ret;
-		return 0;
+		return ret;
 	}
 
 	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
@@ -1801,22 +1818,8 @@
 						saddr, daddr, udptable, proto);
 
 	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
-	if (sk) {
-		int ret;
-
-		if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
-			skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
-						 inet_compute_pseudo);
-
-		ret = udp_queue_rcv_skb(sk, skb);
-
-		/* a return value > 0 means to resubmit the input, but
-		 * it wants the return to be -protocol, or 0
-		 */
-		if (ret > 0)
-			return -ret;
-		return 0;
-	}
+	if (sk)
+		return udp_unicast_rcv_skb(sk, skb, uh);
 
 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
 		goto drop;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 9077060..e838cf9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4113,7 +4113,6 @@
 				p++;
 				continue;
 			}
-			state->offset++;
 			return ifa;
 		}
 
@@ -4137,13 +4136,12 @@
 		return ifa;
 	}
 
+	state->offset = 0;
 	while (++state->bucket < IN6_ADDR_HSIZE) {
-		state->offset = 0;
 		hlist_for_each_entry_rcu_bh(ifa,
 				     &inet6_addr_lst[state->bucket], addr_lst) {
 			if (!net_eq(dev_net(ifa->idev->dev), net))
 				continue;
-			state->offset++;
 			return ifa;
 		}
 	}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 3cdf4dc..7c539de 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -113,6 +113,7 @@
 			payload_len = skb->len - nhoff - sizeof(*ipv6h);
 		ipv6h->payload_len = htons(payload_len);
 		skb->network_header = (u8 *)ipv6h - skb->head;
+		skb_reset_mac_len(skb);
 
 		if (udpfrag) {
 			int err = ip6_find_1stfragopt(skb, &prevhdr);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index def627f..46f8e7c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -210,12 +210,10 @@
 				kfree_skb(skb);
 				return -ENOBUFS;
 			}
+			if (skb->sk)
+				skb_set_owner_w(skb2, skb->sk);
 			consume_skb(skb);
 			skb = skb2;
-			/* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
-			 * it is safe to call in our context (socket lock not held)
-			 */
-			skb_set_owner_w(skb, (struct sock *)sk);
 		}
 		if (opt->opt_flen)
 			ipv6_push_frag_opts(skb, opt, &proto);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0149bfd..40d483e 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1226,7 +1226,7 @@
 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip6_tnl *t = netdev_priv(dev);
-	const struct iphdr  *iph = ip_hdr(skb);
+	const struct iphdr  *iph;
 	int encap_limit = -1;
 	struct flowi6 fl6;
 	__u8 dsfield;
@@ -1234,6 +1234,11 @@
 	u8 tproto;
 	int err;
 
+	/* ensure we can access the full inner ip header */
+	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+		return -1;
+
+	iph = ip_hdr(skb);
 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 
 	tproto = ACCESS_ONCE(t->parms.proto);
@@ -1295,7 +1300,7 @@
 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip6_tnl *t = netdev_priv(dev);
-	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	struct ipv6hdr *ipv6h;
 	int encap_limit = -1;
 	__u16 offset;
 	struct flowi6 fl6;
@@ -1304,6 +1309,10 @@
 	u8 tproto;
 	int err;
 
+	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+		return -1;
+
+	ipv6h = ipv6_hdr(skb);
 	tproto = ACCESS_ONCE(t->parms.proto);
 	if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
 	    ip6_tnl_addr_conflict(t, ipv6h))
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index ee33a67..b9147558 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -63,7 +63,6 @@
 static struct inet_frags nf_frags;
 
 #ifdef CONFIG_SYSCTL
-static int zero;
 
 static struct ctl_table nf_ct_frag6_sysctl_table[] = {
 	{
@@ -76,18 +75,17 @@
 	{
 		.procname	= "nf_conntrack_frag6_low_thresh",
 		.data		= &init_net.nf_frag.frags.low_thresh,
-		.maxlen		= sizeof(unsigned int),
+		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
+		.proc_handler	= proc_doulongvec_minmax,
 		.extra2		= &init_net.nf_frag.frags.high_thresh
 	},
 	{
 		.procname	= "nf_conntrack_frag6_high_thresh",
 		.data		= &init_net.nf_frag.frags.high_thresh,
-		.maxlen		= sizeof(unsigned int),
+		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= proc_doulongvec_minmax,
 		.extra1		= &init_net.nf_frag.frags.low_thresh
 	},
 	{ }
@@ -152,23 +150,6 @@
 	return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
 }
 
-static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
-				 const struct in6_addr *daddr)
-{
-	net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
-	return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
-			    (__force u32)id, nf_frags.rnd);
-}
-
-
-static unsigned int nf_hashfn(const struct inet_frag_queue *q)
-{
-	const struct frag_queue *nq;
-
-	nq = container_of(q, struct frag_queue, q);
-	return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
-}
-
 static void nf_ct_frag6_expire(unsigned long data)
 {
 	struct frag_queue *fq;
@@ -177,34 +158,26 @@
 	fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
 	net = container_of(fq->q.net, struct net, nf_frag.frags);
 
-	ip6_expire_frag_queue(net, fq, &nf_frags);
+	ip6_expire_frag_queue(net, fq);
 }
 
 /* Creation primitives. */
-static inline struct frag_queue *fq_find(struct net *net, __be32 id,
-					 u32 user, struct in6_addr *src,
-					 struct in6_addr *dst, int iif, u8 ecn)
+static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
+				  const struct ipv6hdr *hdr, int iif)
 {
+	struct frag_v6_compare_key key = {
+		.id = id,
+		.saddr = hdr->saddr,
+		.daddr = hdr->daddr,
+		.user = user,
+		.iif = iif,
+	};
 	struct inet_frag_queue *q;
-	struct ip6_create_arg arg;
-	unsigned int hash;
 
-	arg.id = id;
-	arg.user = user;
-	arg.src = src;
-	arg.dst = dst;
-	arg.iif = iif;
-	arg.ecn = ecn;
-
-	local_bh_disable();
-	hash = nf_hash_frag(id, src, dst);
-
-	q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
-	local_bh_enable();
-	if (IS_ERR_OR_NULL(q)) {
-		inet_frag_maybe_warn_overflow(q, pr_fmt());
+	q = inet_frag_find(&net->nf_frag.frags, &key);
+	if (!q)
 		return NULL;
-	}
+
 	return container_of(q, struct frag_queue, q);
 }
 
@@ -263,7 +236,7 @@
 			 * this case. -DaveM
 			 */
 			pr_debug("end of fragment not rounded to 8 bytes.\n");
-			inet_frag_kill(&fq->q, &nf_frags);
+			inet_frag_kill(&fq->q);
 			return -EPROTO;
 		}
 		if (end > fq->q.len) {
@@ -356,7 +329,7 @@
 	return 0;
 
 discard_fq:
-	inet_frag_kill(&fq->q, &nf_frags);
+	inet_frag_kill(&fq->q);
 err:
 	return -EINVAL;
 }
@@ -378,7 +351,7 @@
 	int    payload_len;
 	u8 ecn;
 
-	inet_frag_kill(&fq->q, &nf_frags);
+	inet_frag_kill(&fq->q);
 
 	WARN_ON(head == NULL);
 	WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
@@ -479,6 +452,7 @@
 		else if (head->ip_summed == CHECKSUM_COMPLETE)
 			head->csum = csum_add(head->csum, fp->csum);
 		head->truesize += fp->truesize;
+		fp->sk = NULL;
 	}
 	sub_frag_mem_limit(fq->q.net, head->truesize);
 
@@ -497,6 +471,7 @@
 					  head->csum);
 
 	fq->q.fragments = NULL;
+	fq->q.rb_fragments = RB_ROOT;
 	fq->q.fragments_tail = NULL;
 
 	return true;
@@ -591,9 +566,13 @@
 	hdr = ipv6_hdr(skb);
 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
 
+	if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
+	    fhdr->frag_off & htons(IP6_MF))
+		return -EINVAL;
+
 	skb_orphan(skb);
-	fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
-		     skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+	fq = fq_find(net, fhdr->identification, user, hdr,
+		     skb->dev ? skb->dev->ifindex : 0);
 	if (fq == NULL) {
 		pr_debug("Can't find and can't create new queue\n");
 		return -ENOMEM;
@@ -623,25 +602,33 @@
 
 out_unlock:
 	spin_unlock_bh(&fq->q.lock);
-	inet_frag_put(&fq->q, &nf_frags);
+	inet_frag_put(&fq->q);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
 
 static int nf_ct_net_init(struct net *net)
 {
+	int res;
+
 	net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
 	net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
 	net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
-	inet_frags_init_net(&net->nf_frag.frags);
+	net->nf_frag.frags.f = &nf_frags;
 
-	return nf_ct_frag6_sysctl_register(net);
+	res = inet_frags_init_net(&net->nf_frag.frags);
+	if (res < 0)
+		return res;
+	res = nf_ct_frag6_sysctl_register(net);
+	if (res < 0)
+		inet_frags_exit_net(&net->nf_frag.frags);
+	return res;
 }
 
 static void nf_ct_net_exit(struct net *net)
 {
 	nf_ct_frags6_sysctl_unregister(net);
-	inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
+	inet_frags_exit_net(&net->nf_frag.frags);
 }
 
 static struct pernet_operations nf_ct_net_ops = {
@@ -653,13 +640,12 @@
 {
 	int ret = 0;
 
-	nf_frags.hashfn = nf_hashfn;
 	nf_frags.constructor = ip6_frag_init;
 	nf_frags.destructor = NULL;
 	nf_frags.qsize = sizeof(struct frag_queue);
-	nf_frags.match = ip6_frag_match;
 	nf_frags.frag_expire = nf_ct_frag6_expire;
 	nf_frags.frags_cache_name = nf_frags_cache_name;
+	nf_frags.rhash_params = ip6_rhash_params;
 	ret = inet_frags_init(&nf_frags);
 	if (ret)
 		goto out;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index e88bcb8..dc04c02 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -38,7 +38,6 @@
 static int sockstat6_seq_show(struct seq_file *seq, void *v)
 {
 	struct net *net = seq->private;
-	unsigned int frag_mem = ip6_frag_mem(net);
 
 	seq_printf(seq, "TCP6: inuse %d\n",
 		       sock_prot_inuse_get(net, &tcpv6_prot));
@@ -48,7 +47,9 @@
 			sock_prot_inuse_get(net, &udplitev6_prot));
 	seq_printf(seq, "RAW6: inuse %d\n",
 		       sock_prot_inuse_get(net, &rawv6_prot));
-	seq_printf(seq, "FRAG6: inuse %u memory %u\n", !!frag_mem, frag_mem);
+	seq_printf(seq, "FRAG6: inuse %u memory %lu\n",
+		   atomic_read(&net->ipv6.frags.rhashtable.nelems),
+		   frag_mem_limit(&net->ipv6.frags));
 	return 0;
 }
 
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ff701ce..55d284a 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -645,8 +645,6 @@
 	skb->protocol = htons(ETH_P_IPV6);
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
-	skb_dst_set(skb, &rt->dst);
-	*dstp = NULL;
 
 	skb_put(skb, length);
 	skb_reset_network_header(skb);
@@ -656,8 +654,14 @@
 
 	skb->transport_header = skb->network_header;
 	err = memcpy_from_msg(iph, msg, length);
-	if (err)
-		goto error_fault;
+	if (err) {
+		err = -EFAULT;
+		kfree_skb(skb);
+		goto error;
+	}
+
+	skb_dst_set(skb, &rt->dst);
+	*dstp = NULL;
 
 	/* if egress device is enslaved to an L3 master device pass the
 	 * skb to its handler for processing
@@ -666,21 +670,28 @@
 	if (unlikely(!skb))
 		return 0;
 
+	/* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev
+	 * in the error path. Since skb has been freed, the dst could
+	 * have been queued for deletion.
+	 */
+	rcu_read_lock();
 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
 		      NULL, rt->dst.dev, dst_output);
 	if (err > 0)
 		err = net_xmit_errno(err);
-	if (err)
-		goto error;
+	if (err) {
+		IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+		rcu_read_unlock();
+		goto error_check;
+	}
+	rcu_read_unlock();
 out:
 	return 0;
 
-error_fault:
-	err = -EFAULT;
-	kfree_skb(skb);
 error:
 	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+error_check:
 	if (err == -ENOBUFS && !np->recverr)
 		err = 0;
 	return err;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index e585c0a..74ffbcb 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -79,94 +79,58 @@
 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 			  struct net_device *dev);
 
-/*
- * callers should be careful not to use the hash value outside the ipfrag_lock
- * as doing so could race with ipfrag_hash_rnd being recalculated.
- */
-static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
-				    const struct in6_addr *daddr)
-{
-	net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
-	return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
-			    (__force u32)id, ip6_frags.rnd);
-}
-
-static unsigned int ip6_hashfn(const struct inet_frag_queue *q)
-{
-	const struct frag_queue *fq;
-
-	fq = container_of(q, struct frag_queue, q);
-	return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
-}
-
-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
-{
-	const struct frag_queue *fq;
-	const struct ip6_create_arg *arg = a;
-
-	fq = container_of(q, struct frag_queue, q);
-	return	fq->id == arg->id &&
-		fq->user == arg->user &&
-		ipv6_addr_equal(&fq->saddr, arg->src) &&
-		ipv6_addr_equal(&fq->daddr, arg->dst) &&
-		(arg->iif == fq->iif ||
-		 !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
-					       IPV6_ADDR_LINKLOCAL)));
-}
-EXPORT_SYMBOL(ip6_frag_match);
-
 void ip6_frag_init(struct inet_frag_queue *q, const void *a)
 {
 	struct frag_queue *fq = container_of(q, struct frag_queue, q);
-	const struct ip6_create_arg *arg = a;
+	const struct frag_v6_compare_key *key = a;
 
-	fq->id = arg->id;
-	fq->user = arg->user;
-	fq->saddr = *arg->src;
-	fq->daddr = *arg->dst;
-	fq->ecn = arg->ecn;
+	q->key.v6 = *key;
+	fq->ecn = 0;
 }
 EXPORT_SYMBOL(ip6_frag_init);
 
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
-			   struct inet_frags *frags)
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
 {
 	struct net_device *dev = NULL;
+	struct sk_buff *head;
 
+	rcu_read_lock();
 	spin_lock(&fq->q.lock);
 
 	if (fq->q.flags & INET_FRAG_COMPLETE)
 		goto out;
 
-	inet_frag_kill(&fq->q, frags);
+	inet_frag_kill(&fq->q);
 
-	rcu_read_lock();
 	dev = dev_get_by_index_rcu(net, fq->iif);
 	if (!dev)
-		goto out_rcu_unlock;
+		goto out;
 
 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
-
-	if (inet_frag_evicting(&fq->q))
-		goto out_rcu_unlock;
-
 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
 
 	/* Don't send error if the first segment did not arrive. */
-	if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
-		goto out_rcu_unlock;
+	head = fq->q.fragments;
+	if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
+		goto out;
 
 	/* But use as source device on which LAST ARRIVED
 	 * segment was received. And do not use fq->dev
 	 * pointer directly, device might already disappeared.
 	 */
-	fq->q.fragments->dev = dev;
-	icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
-out_rcu_unlock:
-	rcu_read_unlock();
+	head->dev = dev;
+	skb_get(head);
+	spin_unlock(&fq->q.lock);
+
+	icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
+	kfree_skb(head);
+	goto out_rcu_unlock;
+
 out:
 	spin_unlock(&fq->q.lock);
-	inet_frag_put(&fq->q, frags);
+out_rcu_unlock:
+	rcu_read_unlock();
+	inet_frag_put(&fq->q);
 }
 EXPORT_SYMBOL(ip6_expire_frag_queue);
 
@@ -178,31 +142,29 @@
 	fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
 	net = container_of(fq->q.net, struct net, ipv6.frags);
 
-	ip6_expire_frag_queue(net, fq, &ip6_frags);
+	ip6_expire_frag_queue(net, fq);
 }
 
 static struct frag_queue *
-fq_find(struct net *net, __be32 id, const struct in6_addr *src,
-	const struct in6_addr *dst, int iif, u8 ecn)
+fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
 {
+	struct frag_v6_compare_key key = {
+		.id = id,
+		.saddr = hdr->saddr,
+		.daddr = hdr->daddr,
+		.user = IP6_DEFRAG_LOCAL_DELIVER,
+		.iif = iif,
+	};
 	struct inet_frag_queue *q;
-	struct ip6_create_arg arg;
-	unsigned int hash;
 
-	arg.id = id;
-	arg.user = IP6_DEFRAG_LOCAL_DELIVER;
-	arg.src = src;
-	arg.dst = dst;
-	arg.iif = iif;
-	arg.ecn = ecn;
+	if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
+					    IPV6_ADDR_LINKLOCAL)))
+		key.iif = 0;
 
-	hash = inet6_hash_frag(id, src, dst);
-
-	q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
-	if (IS_ERR_OR_NULL(q)) {
-		inet_frag_maybe_warn_overflow(q, pr_fmt());
+	q = inet_frag_find(&net->ipv6.frags, &key);
+	if (!q)
 		return NULL;
-	}
+
 	return container_of(q, struct frag_queue, q);
 }
 
@@ -359,7 +321,7 @@
 	return -1;
 
 discard_fq:
-	inet_frag_kill(&fq->q, &ip6_frags);
+	inet_frag_kill(&fq->q);
 err:
 	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 			IPSTATS_MIB_REASMFAILS);
@@ -386,7 +348,7 @@
 	int sum_truesize;
 	u8 ecn;
 
-	inet_frag_kill(&fq->q, &ip6_frags);
+	inet_frag_kill(&fq->q);
 
 	ecn = ip_frag_ecn_table[fq->ecn];
 	if (unlikely(ecn == 0xff))
@@ -504,6 +466,7 @@
 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
 	rcu_read_unlock();
 	fq->q.fragments = NULL;
+	fq->q.rb_fragments = RB_ROOT;
 	fq->q.fragments_tail = NULL;
 	return 1;
 
@@ -525,6 +488,7 @@
 	struct frag_queue *fq;
 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
 	struct net *net = dev_net(skb_dst(skb)->dev);
+	int iif;
 
 	if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
 		goto fail_hdr;
@@ -553,17 +517,22 @@
 		return 1;
 	}
 
-	fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
-		     skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+	if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
+	    fhdr->frag_off & htons(IP6_MF))
+		goto fail_hdr;
+
+	iif = skb->dev ? skb->dev->ifindex : 0;
+	fq = fq_find(net, fhdr->identification, hdr, iif);
 	if (fq) {
 		int ret;
 
 		spin_lock(&fq->q.lock);
 
+		fq->iif = iif;
 		ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
 
 		spin_unlock(&fq->q.lock);
-		inet_frag_put(&fq->q, &ip6_frags);
+		inet_frag_put(&fq->q);
 		return ret;
 	}
 
@@ -584,24 +553,22 @@
 };
 
 #ifdef CONFIG_SYSCTL
-static int zero;
 
 static struct ctl_table ip6_frags_ns_ctl_table[] = {
 	{
 		.procname	= "ip6frag_high_thresh",
 		.data		= &init_net.ipv6.frags.high_thresh,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= proc_doulongvec_minmax,
 		.extra1		= &init_net.ipv6.frags.low_thresh
 	},
 	{
 		.procname	= "ip6frag_low_thresh",
 		.data		= &init_net.ipv6.frags.low_thresh,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
+		.proc_handler	= proc_doulongvec_minmax,
 		.extra2		= &init_net.ipv6.frags.high_thresh
 	},
 	{
@@ -644,10 +611,6 @@
 		table[1].data = &net->ipv6.frags.low_thresh;
 		table[1].extra2 = &net->ipv6.frags.high_thresh;
 		table[2].data = &net->ipv6.frags.timeout;
-
-		/* Don't export sysctls to unprivileged users */
-		if (net->user_ns != &init_user_ns)
-			table[0].procname = NULL;
 	}
 
 	hdr = register_net_sysctl(net, "net/ipv6", table);
@@ -709,19 +672,27 @@
 
 static int __net_init ipv6_frags_init_net(struct net *net)
 {
+	int res;
+
 	net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
 	net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
 	net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
+	net->ipv6.frags.f = &ip6_frags;
 
-	inet_frags_init_net(&net->ipv6.frags);
+	res = inet_frags_init_net(&net->ipv6.frags);
+	if (res < 0)
+		return res;
 
-	return ip6_frags_ns_sysctl_register(net);
+	res = ip6_frags_ns_sysctl_register(net);
+	if (res < 0)
+		inet_frags_exit_net(&net->ipv6.frags);
+	return res;
 }
 
 static void __net_exit ipv6_frags_exit_net(struct net *net)
 {
 	ip6_frags_ns_sysctl_unregister(net);
-	inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
+	inet_frags_exit_net(&net->ipv6.frags);
 }
 
 static struct pernet_operations ip6_frags_ops = {
@@ -729,14 +700,55 @@
 	.exit = ipv6_frags_exit_net,
 };
 
+static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
+{
+	return jhash2(data,
+		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+	const struct inet_frag_queue *fq = data;
+
+	return jhash2((const u32 *)&fq->key.v6,
+		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+	const struct frag_v6_compare_key *key = arg->key;
+	const struct inet_frag_queue *fq = ptr;
+
+	return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+const struct rhashtable_params ip6_rhash_params = {
+	.head_offset		= offsetof(struct inet_frag_queue, node),
+	.hashfn			= ip6_key_hashfn,
+	.obj_hashfn		= ip6_obj_hashfn,
+	.obj_cmpfn		= ip6_obj_cmpfn,
+	.automatic_shrinking	= true,
+};
+EXPORT_SYMBOL(ip6_rhash_params);
+
 int __init ipv6_frag_init(void)
 {
 	int ret;
 
-	ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
+	ip6_frags.constructor = ip6_frag_init;
+	ip6_frags.destructor = NULL;
+	ip6_frags.qsize = sizeof(struct frag_queue);
+	ip6_frags.frag_expire = ip6_frag_expire;
+	ip6_frags.frags_cache_name = ip6_frag_cache_name;
+	ip6_frags.rhash_params = ip6_rhash_params;
+	ret = inet_frags_init(&ip6_frags);
 	if (ret)
 		goto out;
 
+	ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
+	if (ret)
+		goto err_protocol;
+
 	ret = ip6_frags_sysctl_register();
 	if (ret)
 		goto err_sysctl;
@@ -745,16 +757,6 @@
 	if (ret)
 		goto err_pernet;
 
-	ip6_frags.hashfn = ip6_hashfn;
-	ip6_frags.constructor = ip6_frag_init;
-	ip6_frags.destructor = NULL;
-	ip6_frags.qsize = sizeof(struct frag_queue);
-	ip6_frags.match = ip6_frag_match;
-	ip6_frags.frag_expire = ip6_frag_expire;
-	ip6_frags.frags_cache_name = ip6_frag_cache_name;
-	ret = inet_frags_init(&ip6_frags);
-	if (ret)
-		goto err_pernet;
 out:
 	return ret;
 
@@ -762,6 +764,8 @@
 	ip6_frags_sysctl_unregister();
 err_sysctl:
 	inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
+err_protocol:
+	inet_frags_fini(&ip6_frags);
 	goto out;
 }
 
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index e63fd12..6ef9d32 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -386,7 +386,7 @@
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_AP_VLAN:
 		/* Keys without a station are used for TX only */
-		if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+		if (sta && test_sta_flag(sta, WLAN_STA_MFP))
 			key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
 		break;
 	case NL80211_IFTYPE_ADHOC:
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a5acaf1..0c0695e 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -948,8 +948,8 @@
 	if (len < IEEE80211_DEAUTH_FRAME_LEN)
 		return;
 
-	ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
-		 mgmt->sa, mgmt->da, mgmt->bssid, reason);
+	ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+	ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
 	sta_info_destroy_addr(sdata, mgmt->sa);
 }
 
@@ -967,9 +967,9 @@
 	auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
 	auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
 
-	ibss_dbg(sdata,
-		 "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
-		 mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+	ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+	ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
+		 mgmt->bssid, auth_transaction);
 
 	if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
 		return;
@@ -1176,10 +1176,10 @@
 		rx_timestamp = drv_get_tsf(local, sdata);
 	}
 
-	ibss_dbg(sdata,
-		 "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+	ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
 		 mgmt->sa, mgmt->bssid,
-		 (unsigned long long)rx_timestamp,
+		 (unsigned long long)rx_timestamp);
+	ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
 		 (unsigned long long)beacon_timestamp,
 		 (unsigned long long)(rx_timestamp - beacon_timestamp),
 		 jiffies);
@@ -1538,9 +1538,9 @@
 
 	tx_last_beacon = drv_tx_last_beacon(local);
 
-	ibss_dbg(sdata,
-		 "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
-		 mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
+	ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+	ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
+		 mgmt->bssid, tx_last_beacon);
 
 	if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
 		return;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 2bb6899..e3bbfb2 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -254,8 +254,27 @@
 	     "%s called with hardware scan in progress\n", __func__);
 
 	rtnl_lock();
-	list_for_each_entry(sdata, &local->interfaces, list)
+	list_for_each_entry(sdata, &local->interfaces, list) {
+		/*
+		 * XXX: there may be more work for other vif types and even
+		 * for station mode: a good thing would be to run most of
+		 * the iface type's dependent _stop (ieee80211_mg_stop,
+		 * ieee80211_ibss_stop) etc...
+		 * For now, fix only the specific bug that was seen: race
+		 * between csa_connection_drop_work and us.
+		 */
+		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+			/*
+			 * This worker is scheduled from the iface worker that
+			 * runs on mac80211's workqueue, so we can't be
+			 * scheduling this worker after the cancel right here.
+			 * The exception is ieee80211_chswitch_done.
+			 * Then we can have a race...
+			 */
+			cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
+		}
 		flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+	}
 	ieee80211_scan_cancel(local);
 
 	/* make sure any new ROC will consider local->in_reconfig */
@@ -466,10 +485,7 @@
 		cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
 			    IEEE80211_VHT_CAP_SHORT_GI_80 |
 			    IEEE80211_VHT_CAP_SHORT_GI_160 |
-			    IEEE80211_VHT_CAP_RXSTBC_1 |
-			    IEEE80211_VHT_CAP_RXSTBC_2 |
-			    IEEE80211_VHT_CAP_RXSTBC_3 |
-			    IEEE80211_VHT_CAP_RXSTBC_4 |
+			    IEEE80211_VHT_CAP_RXSTBC_MASK |
 			    IEEE80211_VHT_CAP_TXSTBC |
 			    IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
 			    IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
@@ -1164,6 +1180,7 @@
 #if IS_ENABLED(CONFIG_IPV6)
 	unregister_inet6addr_notifier(&local->ifa6_notifier);
 #endif
+	ieee80211_txq_teardown_flows(local);
 
 	rtnl_lock();
 
@@ -1191,7 +1208,6 @@
 	skb_queue_purge(&local->skb_queue);
 	skb_queue_purge(&local->skb_queue_unreliable);
 	skb_queue_purge(&local->skb_queue_tdls_chsw);
-	ieee80211_txq_teardown_flows(local);
 
 	destroy_workqueue(local->workqueue);
 	wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index fed598a..b0acb29 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -563,6 +563,10 @@
 		forward = false;
 		reply = true;
 		target_metric = 0;
+
+		if (SN_GT(target_sn, ifmsh->sn))
+			ifmsh->sn = target_sn;
+
 		if (time_after(jiffies, ifmsh->last_sn_update +
 					net_traversal_jiffies(sdata)) ||
 		    time_before(jiffies, ifmsh->last_sn_update)) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 973adf3..70d289d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -989,6 +989,10 @@
 	 */
 
 	if (sdata->reserved_chanctx) {
+		struct ieee80211_supported_band *sband = NULL;
+		struct sta_info *mgd_sta = NULL;
+		enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
+
 		/*
 		 * with multi-vif csa driver may call ieee80211_csa_finish()
 		 * many times while waiting for other interfaces to use their
@@ -997,6 +1001,48 @@
 		if (sdata->reserved_ready)
 			goto out;
 
+		if (sdata->vif.bss_conf.chandef.width !=
+		    sdata->csa_chandef.width) {
+			/*
+			 * For managed interface, we need to also update the AP
+			 * station bandwidth and align the rate scale algorithm
+			 * on the bandwidth change. Here we only consider the
+			 * bandwidth of the new channel definition (as channel
+			 * switch flow does not have the full HT/VHT/HE
+			 * information), assuming that if additional changes are
+			 * required they would be done as part of the processing
+			 * of the next beacon from the AP.
+			 */
+			switch (sdata->csa_chandef.width) {
+			case NL80211_CHAN_WIDTH_20_NOHT:
+			case NL80211_CHAN_WIDTH_20:
+			default:
+				bw = IEEE80211_STA_RX_BW_20;
+				break;
+			case NL80211_CHAN_WIDTH_40:
+				bw = IEEE80211_STA_RX_BW_40;
+				break;
+			case NL80211_CHAN_WIDTH_80:
+				bw = IEEE80211_STA_RX_BW_80;
+				break;
+			case NL80211_CHAN_WIDTH_80P80:
+			case NL80211_CHAN_WIDTH_160:
+				bw = IEEE80211_STA_RX_BW_160;
+				break;
+			}
+
+			mgd_sta = sta_info_get(sdata, ifmgd->bssid);
+			sband =
+				local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
+		}
+
+		if (sdata->vif.bss_conf.chandef.width >
+		    sdata->csa_chandef.width) {
+			mgd_sta->sta.bandwidth = bw;
+			rate_control_rate_update(local, sband, mgd_sta,
+						 IEEE80211_RC_BW_CHANGED);
+		}
+
 		ret = ieee80211_vif_use_reserved_context(sdata);
 		if (ret) {
 			sdata_info(sdata,
@@ -1007,6 +1053,13 @@
 			goto out;
 		}
 
+		if (sdata->vif.bss_conf.chandef.width <
+		    sdata->csa_chandef.width) {
+			mgd_sta->sta.bandwidth = bw;
+			rate_control_rate_update(local, sband, mgd_sta,
+						 IEEE80211_RC_BW_CHANGED);
+		}
+
 		goto out;
 	}
 
@@ -1229,6 +1282,16 @@
 					 cbss->beacon_interval));
 	return;
  drop_connection:
+	/*
+	 * This is just so that the disconnect flow will know that
+	 * we were trying to switch channel and failed. In case the
+	 * mode is 1 (we are not allowed to Tx), we will know not to
+	 * send a deauthentication frame. Those two fields will be
+	 * reset when the disconnection worker runs.
+	 */
+	sdata->vif.csa_active = true;
+	sdata->csa_block_tx = csa_ie.mode;
+
 	ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
 	mutex_unlock(&local->chanctx_mtx);
 	mutex_unlock(&local->mtx);
@@ -2401,6 +2464,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+	bool tx;
 
 	sdata_lock(sdata);
 	if (!ifmgd->associated) {
@@ -2408,6 +2472,8 @@
 		return;
 	}
 
+	tx = !sdata->csa_block_tx;
+
 	/* AP is probably out of range (or not reachable for another reason) so
 	 * remove the bss struct for that AP.
 	 */
@@ -2415,7 +2481,7 @@
 
 	ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
 			       WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
-			       true, frame_buf);
+			       tx, frame_buf);
 	mutex_lock(&local->mtx);
 	sdata->vif.csa_active = false;
 	ifmgd->csa_waiting_bcn = false;
@@ -2426,7 +2492,7 @@
 	}
 	mutex_unlock(&local->mtx);
 
-	ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+	ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
 				    WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
 
 	sdata_unlock(sdata);
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index a123d0d..053ba86 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -787,7 +787,8 @@
 {
 	u32 addr_len;
 
-	if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
+	if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
+	    info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
 		addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
 		if (addr_len != sizeof(struct in_addr) &&
 		    addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 13ff407..c934189 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1310,7 +1310,7 @@
 					  u8 *op_class)
 {
 	u8 vht_opclass;
-	u16 freq = chandef->center_freq1;
+	u32 freq = chandef->center_freq1;
 
 	if (freq >= 2412 && freq <= 2472) {
 		if (chandef->width > NL80211_CHAN_WIDTH_40)
diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c
index 7196008..6555742 100644
--- a/sound/aoa/core/gpio-feature.c
+++ b/sound/aoa/core/gpio-feature.c
@@ -88,8 +88,10 @@
 	}
 
 	reg = of_get_property(np, "reg", NULL);
-	if (!reg)
+	if (!reg) {
+		of_node_put(np);
 		return NULL;
+	}
 
 	*gpioptr = *reg;
 
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 3469ac14..d0dfa82 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -263,6 +263,8 @@
 error:
 	mutex_unlock(&devices_mutex);
 	snd_bebob_stream_destroy_duplex(bebob);
+	kfree(bebob->maudio_special_quirk);
+	bebob->maudio_special_quirk = NULL;
 	snd_card_free(bebob->card);
 	dev_info(&bebob->unit->device,
 		 "Sound card registration failed: %d\n", err);
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 07e5abd..6dbf047 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -96,17 +96,13 @@
 	struct fw_device *device = fw_parent_device(unit);
 	int err, rcode;
 	u64 date;
-	__le32 cues[3] = {
-		cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
-		cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
-		cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
-	};
+	__le32 *cues;
 
 	/* check date of software used to build */
 	err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
 				   &date, sizeof(u64));
 	if (err < 0)
-		goto end;
+		return err;
 	/*
 	 * firmware version 5058 or later has date later than "20070401", but
 	 * 'date' is not null-terminated.
@@ -114,20 +110,28 @@
 	if (date < 0x3230303730343031LL) {
 		dev_err(&unit->device,
 			"Use firmware version 5058 or later\n");
-		err = -ENOSYS;
-		goto end;
+		return -ENXIO;
 	}
 
+	cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
+	if (!cues)
+		return -ENOMEM;
+
+	cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
+	cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
+	cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
+
 	rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
 				   device->node_id, device->generation,
 				   device->max_speed, BEBOB_ADDR_REG_REQ,
-				   cues, sizeof(cues));
+				   cues, 3 * sizeof(*cues));
+	kfree(cues);
 	if (rcode != RCODE_COMPLETE) {
 		dev_err(&unit->device,
 			"Failed to send a cue to load firmware\n");
 		err = -EIO;
 	}
-end:
+
 	return err;
 }
 
@@ -290,10 +294,6 @@
 		bebob->midi_output_ports = 2;
 	}
 end:
-	if (err < 0) {
-		kfree(params);
-		bebob->maudio_special_quirk = NULL;
-	}
 	mutex_unlock(&bebob->mutex);
 	return err;
 }
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index 1f5e1d2..ef68999 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -49,6 +49,7 @@
 	fw_unit_put(dg00x->unit);
 
 	mutex_destroy(&dg00x->mutex);
+	kfree(dg00x);
 }
 
 static void dg00x_card_free(struct snd_card *card)
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 71a0613..f2d0733 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -301,6 +301,8 @@
 	snd_efw_transaction_remove_instance(efw);
 	snd_efw_stream_destroy_duplex(efw);
 	snd_card_free(efw->card);
+	kfree(efw->resp_buf);
+	efw->resp_buf = NULL;
 	dev_info(&efw->unit->device,
 		 "Sound card registration failed: %d\n", err);
 }
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 474b06d..696b6cf 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -135,6 +135,7 @@
 
 	kfree(oxfw->spec);
 	mutex_destroy(&oxfw->mutex);
+	kfree(oxfw);
 }
 
 /*
@@ -212,6 +213,7 @@
 static void do_registration(struct work_struct *work)
 {
 	struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
+	int i;
 	int err;
 
 	if (oxfw->registered)
@@ -274,7 +276,15 @@
 	snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
 	if (oxfw->has_output)
 		snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+	for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) {
+		kfree(oxfw->tx_stream_formats[i]);
+		oxfw->tx_stream_formats[i] = NULL;
+		kfree(oxfw->rx_stream_formats[i]);
+		oxfw->rx_stream_formats[i] = NULL;
+	}
 	snd_card_free(oxfw->card);
+	kfree(oxfw->spec);
+	oxfw->spec = NULL;
 	dev_info(&oxfw->unit->device,
 		 "Sound card registration failed: %d\n", err);
 }
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index 9dc93a7..4c967ac 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -93,6 +93,7 @@
 	fw_unit_put(tscm->unit);
 
 	mutex_destroy(&tscm->mutex);
+	kfree(tscm);
 }
 
 static void tscm_card_free(struct snd_card *card)
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 8761877..00c6af2 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -40,6 +40,8 @@
  */
 void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
 {
+	WARN_ON_ONCE(!bus->rb.area);
+
 	spin_lock_irq(&bus->reg_lock);
 	/* CORB set up */
 	bus->corb.addr = bus->rb.addr;
@@ -478,13 +480,15 @@
 	/* reset controller */
 	azx_reset(bus, full_reset);
 
-	/* initialize interrupts */
+	/* clear interrupts */
 	azx_int_clear(bus);
-	azx_int_enable(bus);
 
 	/* initialize the codec command I/O */
 	snd_hdac_bus_init_cmd_io(bus);
 
+	/* enable interrupts after CORB/RIRB buffers are initialized above */
+	azx_int_enable(bus);
+
 	/* program the position buffer */
 	if (bus->use_posbuf && bus->posbuf.addr) {
 		snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 56fc47b..50b216f 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -2520,7 +2520,7 @@
 		emu->support_tlv = 1;
 		return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
 	case SNDRV_EMU10K1_IOCTL_INFO:
-		info = kmalloc(sizeof(*info), GFP_KERNEL);
+		info = kzalloc(sizeof(*info), GFP_KERNEL);
 		if (!info)
 			return -ENOMEM;
 		snd_emu10k1_fx8010_info(emu, info);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4e331dd..f913809 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2349,7 +2349,8 @@
 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
 	/* AMD Raven */
 	{ PCI_DEVICE(0x1022, 0x15e3),
-	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+			 AZX_DCAPS_PM_RUNTIME },
 	/* ATI HDMI */
 	{ PCI_DEVICE(0x1002, 0x0002),
 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f03a143..ca29457 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5698,6 +5698,7 @@
 	SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
 	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
 	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+	SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
 	SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
 	SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
 	SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index fd966bb..6e8eb1f 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -157,8 +157,8 @@
 	SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
 				3, 1, 0),
 	SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
-	SOC_SINGLE("MMTLR Data Switch", 0,
-				1, 1, 0),
+	SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
+				0, 1, 0),
 	SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
 	SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
 };
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
index d53680a..6df1586 100644
--- a/sound/soc/codecs/sigmadsp.c
+++ b/sound/soc/codecs/sigmadsp.c
@@ -117,8 +117,7 @@
 	struct sigmadsp_control *ctrl, void *data)
 {
 	/* safeload loads up to 20 bytes in a atomic operation */
-	if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
-	    sigmadsp->ops->safeload)
+	if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
 		return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
 			ctrl->num_bytes);
 	else
diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
index f27464c..7954196 100644
--- a/sound/soc/codecs/wm8804-i2c.c
+++ b/sound/soc/codecs/wm8804-i2c.c
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/i2c.h>
+#include <linux/acpi.h>
 
 #include "wm8804.h"
 
@@ -40,17 +41,29 @@
 };
 MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
 
+#if defined(CONFIG_OF)
 static const struct of_device_id wm8804_of_match[] = {
 	{ .compatible = "wlf,wm8804", },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, wm8804_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id wm8804_acpi_match[] = {
+	{ "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
+	{ "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
+#endif
 
 static struct i2c_driver wm8804_i2c_driver = {
 	.driver = {
 		.name = "wm8804",
 		.pm = &wm8804_pm,
-		.of_match_table = wm8804_of_match,
+		.of_match_table = of_match_ptr(wm8804_of_match),
+		.acpi_match_table = ACPI_PTR(wm8804_acpi_match),
 	},
 	.probe = wm8804_i2c_probe,
 	.remove = wm8804_i2c_remove,
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 682c207..b36c856 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3929,6 +3929,13 @@
 			continue;
 		}
 
+		/* let users know there is no DAI to link */
+		if (!dai_w->priv) {
+			dev_dbg(card->dev, "dai widget %s has no DAI\n",
+				dai_w->name);
+			continue;
+		}
+
 		dai = dai_w->priv;
 
 		/* ...find all widgets with the same stream and link them */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index fbc1474..f6d1bc9 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -104,7 +104,6 @@
 #define X86_FEATURE_EXTD_APICID	( 3*32+26) /* has extended APICID (8 bits) */
 #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
 #define X86_FEATURE_APERFMPERF	( 3*32+28) /* APERFMPERF */
-/* free, was #define X86_FEATURE_EAGER_FPU	( 3*32+29) * "eagerfpu" Non lazy FPU restore */
 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index de477a3..01a288c 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -21,15 +21,16 @@
 
 #endif
 
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
 int arch__choose_best_symbol(struct symbol *syma,
 			     struct symbol *symb __maybe_unused)
 {
 	char *sym = syma->name;
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
 	/* Skip over any initial dot */
 	if (*sym == '.')
 		sym++;
+#endif
 
 	/* Avoid "SyS" kernel syscall aliases */
 	if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
@@ -40,6 +41,7 @@
 	return SYMBOL_A;
 }
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
 /* Allow matching against dot variants */
 int arch__compare_symbol_names(const char *namea, const char *nameb)
 {
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 7656ff8..c001d5a 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -204,14 +204,23 @@
 libpq = CDLL("libpq.so.5")
 PQconnectdb = libpq.PQconnectdb
 PQconnectdb.restype = c_void_p
+PQconnectdb.argtypes = [ c_char_p ]
 PQfinish = libpq.PQfinish
+PQfinish.argtypes = [ c_void_p ]
 PQstatus = libpq.PQstatus
+PQstatus.restype = c_int
+PQstatus.argtypes = [ c_void_p ]
 PQexec = libpq.PQexec
 PQexec.restype = c_void_p
+PQexec.argtypes = [ c_void_p, c_char_p ]
 PQresultStatus = libpq.PQresultStatus
+PQresultStatus.restype = c_int
+PQresultStatus.argtypes = [ c_void_p ]
 PQputCopyData = libpq.PQputCopyData
+PQputCopyData.restype = c_int
 PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
 PQputCopyEnd = libpq.PQputCopyEnd
+PQputCopyEnd.restype = c_int
 PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
 
 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f55d108..3be8c48 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -241,8 +241,9 @@
 {
 	struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
 
-	if (evsel != NULL)
-		perf_evsel__init(evsel, attr, idx);
+	if (!evsel)
+		return NULL;
+	perf_evsel__init(evsel, attr, idx);
 
 	if (perf_evsel__is_bpf_output(evsel)) {
 		evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config
new file mode 100644
index 0000000..4e151f1
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/config
@@ -0,0 +1 @@
+CONFIG_EFIVAR_FS=y
diff --git a/tools/testing/selftests/memory-hotplug/config b/tools/testing/selftests/memory-hotplug/config
index 2fde301..a7e8cd5 100644
--- a/tools/testing/selftests/memory-hotplug/config
+++ b/tools/testing/selftests/memory-hotplug/config
@@ -2,3 +2,4 @@
 CONFIG_MEMORY_HOTPLUG_SPARSE=y
 CONFIG_NOTIFIER_ERROR_INJECTION=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_MEMORY_HOTREMOVE=y
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index e92903f..6d5bcba 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -155,12 +155,6 @@
 };
 
 
-static const char * const debugfs_known_mountpoints[] = {
-	"/sys/kernel/debug",
-	"/debug",
-	0,
-};
-
 /*
  * data structures
  */
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index b9d34b3..6975ec4 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -29,8 +29,8 @@
 	int alias;
 	int refs;
 	int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
-	int hwcache_align, object_size, objs_per_slab;
-	int sanity_checks, slab_size, store_user, trace;
+	unsigned int hwcache_align, object_size, objs_per_slab;
+	unsigned int sanity_checks, slab_size, store_user, trace;
 	int order, poison, reclaim_account, red_zone;
 	unsigned long partial, objects, slabs, objects_partial, objects_total;
 	unsigned long alloc_fastpath, alloc_slowpath;