Merge "ARM: dts: msm: Add LLCC, DDR opp tables of devfreq BW devices for kona"
diff --git a/Documentation/devicetree/bindings/arm/msm/memory-offline.txt b/Documentation/devicetree/bindings/arm/msm/memory-offline.txt
index 96c64d7..f57242b 100644
--- a/Documentation/devicetree/bindings/arm/msm/memory-offline.txt
+++ b/Documentation/devicetree/bindings/arm/msm/memory-offline.txt
@@ -16,9 +16,28 @@
 Required properties:
 - compatible: "qcom,mem-offline"
 - granule: The minimum granule size in mega-bytes for memory onlining/offlining.
-- mem-percent: Percentage of the DDR which will support being onlined/offlined.
-	The system will round down the value to align with the minimum offlinable
-	granule size supported by DDR.
+- offline-sizes: Array of offlinable memory region sizes to apply to targets
+	based on their DDR size.
+
+	Each entry in the array is a pair of sizes, where the first size in the
+	pair is the minimum amount of DDR required in the system in bytes, and
+	the second item in the pair is the size of the offlinable region in
+	bytes which will be applied to the system.
+
+	The offlinable memory region size from the entry where the minimum amount
+	of DDR required in the system is closest, but not greater, than the
+	amount of DDR in the system will be applied.
+	If there are no entries with a minimum amount of DDR required that is less
+	than the amount of DDR in the system then no offlinable region will be
+	created.
+
+	For example, in the following configuration:
+              offline-sizes = <0x1 0x40000000 0x0 0x40000000>,
+			      <0x1 0xc0000000 0x0 0x80000000>;
+	On a 4GB target no offlinable region will be created.
+	On a 6GB target a 1GB offlinable region will be created.
+	On an 8GB target a 2GB offlinable region will be created.
+	On a 12GB target a 2GB offlinable region will be created.
 - mboxes: Reference to the mailbox used by the driver to make requests to
 	online/offline memory.
 
@@ -26,6 +45,7 @@
   mem-offline {
 	compatible = "qcom,mem-offline";
 	granule = <512>;
-	mem-percent = "35";
+	offline-sizes = <0x1 0x40000000 0x0 0x40000000>,
+                        <0x1 0xc0000000 0x0 0x80000000>;
 	mboxes = <&qmp_aop 0>;
   };
diff --git a/Documentation/devicetree/bindings/clock/qcom,npucc.txt b/Documentation/devicetree/bindings/clock/qcom,npucc.txt
new file mode 100644
index 0000000..50977a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,npucc.txt
@@ -0,0 +1,26 @@
+Qualcomm Technologies, Inc. NPU Clock & Reset Controller Bindings
+-----------------------------------------------------------------
+
+Required properties :
+- compatible:		Should be "qcom,npucc-kona".
+- reg:			Shall contain base register addresses and sizes.
+- reg-names:		Names of the register bases listed in the same order as
+			in the reg property.  Shall include: "cc", "qdsp6ss",
+			and "qdsp6ss_pll".
+- vdd_cx-supply:	Phandle of the VDD_CX regulator supply rail that needs
+			to be voted on behalf of the NPU CC clocks.
+- #clock-cells:		Shall contain 1.
+- #reset-cells:		Shall contain 1.
+
+Example:
+
+clock_npucc: qcom,npucc@9980000 {
+	compatible = "qcom,npucc-kona";
+	reg = <0x9980000 0x10000>,
+		<0x9800000 0x10000>,
+		<0x9810000 0x10000>;
+	reg-names = "cc", "qdsp6ss", "qdsp6ss_pll";
+	vdd_cx-supply = <&VDD_CX_LEVEL>;
+	#clock-cells = <1>;
+	#reset-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
index 3c00765..4f91bba 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
@@ -6,7 +6,9 @@
 other hardware subsystems via RSC to control clocks.
 
 Required properties :
-- compatible : shall contain "qcom,sdm845-rpmh-clk"
+- compatible : Shall contain one of the following:
+		"qcom,kona-rpmh-clk",
+		"qcom,sdm845-rpmh-clk"
 
 - #clock-cells : must contain 1
 
@@ -20,3 +22,10 @@
 			#clock-cells = <1>;
 		};
 	};
+
+	&apps_rsc {
+		rpmhcc: clock-controller {
+			compatible = "qcom,kona-rpmh-clk";
+			#clock-cells = <1>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index b1b62ef..009f83f 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -302,6 +302,8 @@
 - qcom,sde-reg-dma-version:	Version of the reg dma hardware block.
 - qcom,sde-reg-dma-trigger-off: Offset of the lut dma trigger reg from "mdp_phys"
 				defined in reg property.
+- qcom,sde-reg-dma-broadcast-disabled: Boolean property to indicate if broadcast
+				functionality in the register dma hardware block should be used.
 - qcom,sde-dram-channels:	This represents the number of channels in the
 				Bus memory controller.
 - qcom,sde-num-nrt-paths:	Integer property represents the number of non-realtime
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 33b8aad..ef92e2d 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -934,6 +934,13 @@
 	help
 	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
 
+config OKL4_GUEST
+	bool "OKL4 Hypervisor guest support"
+	depends on ARM64 && OF
+	default n
+	help
+	  Say Y if you want to run Linux as a guest of the OKL4 hypervisor
+
 config FORCE_MAX_ZONEORDER
 	int
 	default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index aeb0683..83db559 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -403,7 +403,7 @@
 			reg-names = "freq-domain0", "freq-domain1",
 				"freq-domain2";
 
-			clocks = <&clock_xo>, <&clock_gcc GPLL0>;
+			clocks = <&clock_rpmh RPMH_CXO_CLK>, <&clock_gcc GPLL0>;
 			clock-names = "xo", "cpu_clk";
 
 			#freq-domain-cells = <2>;
@@ -577,6 +577,13 @@
 			linux,cma-default;
 		};
 	};
+
+	vendor: vendor {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0xffffffff>;
+		compatible = "simple-bus";
+	};
 };
 
 &soc {
@@ -927,14 +934,14 @@
 		interrupt-controller;
 	};
 
-	clock_xo: bi_tcxo {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <19200000>;
-		clock-output-names = "bi_tcxo";
-	};
-
 	clocks {
+		xo_board: xo-board {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <38400000>;
+			clock-output-names = "xo_board";
+		};
+
 		sleep_clk: sleep-clk {
 			compatible = "fixed-clock";
 			clock-frequency = <32000>;
@@ -943,12 +950,6 @@
 		};
 	};
 
-	clock_rpmh: qcom,rpmhclk {
-		compatible = "qcom,dummycc";
-		clock-output-names = "rpmh_clocks";
-		#clock-cells = <1>;
-	};
-
 	clock_aop: qcom,aopclk {
 		compatible = "qcom,dummycc";
 		clock-output-names = "qdss_clocks";
@@ -966,9 +967,13 @@
 		#reset-cells = <1>;
 	};
 
-	clock_npucc: qcom,npucc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "npucc_clocks";
+	clock_npucc: qcom,npucc@9980000 {
+		compatible = "qcom,npucc-kona", "syscon";
+		reg = <0x9980000 0x10000>,
+			<0x9800000 0x10000>,
+			<0x9810000 0x10000>;
+		reg-names = "cc", "qdsp6ss", "qdsp6ss_pll";
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -1031,8 +1036,9 @@
 		qcom,dispcc = <&clock_dispcc>;
 		qcom,camcc = <&clock_camcc>;
 		qcom,gpucc = <&clock_gpucc>;
+		qcom,npucc = <&clock_npucc>;
 		clock-names = "xo_clk_src";
-		clocks = <&clock_xo>;
+		clocks = <&clock_rpmh RPMH_CXO_CLK>;
 		#clock-cells = <1>;
 	};
 
@@ -1464,6 +1470,11 @@
 		system_pm {
 			compatible = "qcom,system-pm";
 		};
+
+		clock_rpmh: qcom,rpmhclk {
+			compatible = "qcom,kona-rpmh-clk";
+			#clock-cells = <1>;
+		};
 	};
 
 	disp_rsc: rsc@af20000 {
@@ -1980,7 +1991,7 @@
 		compatible = "qcom,pil-tz-generic";
 		reg = <0xabb0000 0x2000>;
 		status = "ok";
-		qcom,pas-id = <25>;
+		qcom,pas-id = <26>;
 		qcom,firmware-name = "cvpss";
 
 		memory-region = <&pil_cvp_mem>;
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
index 48b8968..405edd8 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -21,16 +21,16 @@
 		ranges;
 		qcom,regulator-names = "vdd";
 		vdd-supply = <&gpu_cx_gdsc>;
-		interrupts =	<GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 674 IRQ_TYPE_LEVEL_HIGH>,
+		interrupts =	<GIC_SPI 672 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 679 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 680 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 681 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 683 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 686 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 687 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 688 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 689 IRQ_TYPE_LEVEL_HIGH>;
+				<GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>;
 
 		gfx_0_tbu: gfx_0_tbu@3dc5000 {
 			compatible = "qcom,qsmmuv500-tbu";
@@ -143,11 +143,10 @@
 				<GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
@@ -158,7 +157,8 @@
 				<GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>;
+				<GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>;
 
 		anoc_1_tbu: anoc_1_tbu@15185000 {
 			compatible = "qcom,qsmmuv500-tbu";
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 7fdaead..df6e8d7 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -411,13 +411,15 @@
 CONFIG_MSM_11AD=m
 CONFIG_USB_BAM=y
 CONFIG_QCOM_GENI_SE=y
+CONFIG_QCOM_CLK_RPMH=y
 CONFIG_SPMI_PMIC_CLKDIV=y
-CONFIG_MSM_GCC_KONA=y
+CONFIG_MSM_CLK_AOP_QMP=y
 CONFIG_MSM_VIDEOCC_KONA=y
 CONFIG_MSM_DISPCC_KONA=y
 CONFIG_MSM_CAMCC_KONA=y
 CONFIG_MSM_GPUCC_KONA=y
 CONFIG_MSM_DEBUGCC_KONA=y
+CONFIG_MSM_NPUCC_KONA=y
 CONFIG_HWSPINLOCK=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_MAILBOX=y
@@ -444,6 +446,7 @@
 CONFIG_QCOM_RPMH=y
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_QCOM_SMP2P=y
 CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
 CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 7ddbb3a..85b5f02 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -425,13 +425,15 @@
 CONFIG_MSM_11AD=m
 CONFIG_USB_BAM=y
 CONFIG_QCOM_GENI_SE=y
+CONFIG_QCOM_CLK_RPMH=y
 CONFIG_SPMI_PMIC_CLKDIV=y
-CONFIG_MSM_GCC_KONA=y
+CONFIG_MSM_CLK_AOP_QMP=y
 CONFIG_MSM_VIDEOCC_KONA=y
 CONFIG_MSM_DISPCC_KONA=y
 CONFIG_MSM_CAMCC_KONA=y
 CONFIG_MSM_GPUCC_KONA=y
 CONFIG_MSM_DEBUGCC_KONA=y
+CONFIG_MSM_NPUCC_KONA=y
 CONFIG_HWSPINLOCK=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_MAILBOX=y
@@ -458,6 +460,7 @@
 CONFIG_QCOM_RPMH=y
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_QCOM_SMP2P=y
 CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
 CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index bb1f6c5..3bc6139 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -324,11 +324,13 @@
 static void __init update_memory_limit(void)
 {
 	unsigned long dt_root = of_get_flat_dt_root();
-	unsigned long node, mp;
-	const char *p;
+	unsigned long node;
 	unsigned long long ram_sz, sz;
 	phys_addr_t end_addr, addr_aligned, offset;
-	int ret;
+	int len;
+	const __be32 *prop;
+	phys_addr_t min_ddr_sz = 0, offline_sz = 0;
+	int t_len = (2 * dt_root_size_cells) * sizeof(__be32);
 
 	ram_sz = memblock_phys_mem_size();
 	node = of_get_flat_dt_subnode_by_name(dt_root, "mem-offline");
@@ -336,23 +338,46 @@
 		pr_err("mem-offine node not found in FDT\n");
 		return;
 	}
-	p = of_get_flat_dt_prop(node, "mem-percent", NULL);
-	if (!p) {
-		pr_err("mem-offine: mem-percent property not found in FDT\n");
+
+	prop = of_get_flat_dt_prop(node, "offline-sizes", &len);
+	if (prop) {
+		if (len % t_len != 0) {
+			pr_err("mem-offline: invalid offline-sizes property\n");
+			return;
+		}
+
+		while (len > 0) {
+			phys_addr_t tmp_min_ddr_sz = dt_mem_next_cell(
+							dt_root_addr_cells,
+							&prop);
+			phys_addr_t tmp_offline_sz = dt_mem_next_cell(
+							dt_root_size_cells,
+							&prop);
+
+			if (tmp_min_ddr_sz < ram_sz &&
+			    tmp_min_ddr_sz > min_ddr_sz) {
+				if (tmp_offline_sz < ram_sz) {
+					min_ddr_sz = tmp_min_ddr_sz;
+					offline_sz = tmp_offline_sz;
+				} else {
+					pr_info("mem-offline: invalid offline size:%pa\n",
+						 &tmp_offline_sz);
+				}
+			}
+			len -= t_len;
+		}
+	} else {
+		pr_err("mem-offine: offline-sizes property not found in DT\n");
 		return;
 	}
 
-	ret = kstrtoul(p, 10, &mp);
-	if (ret) {
-		pr_err("mem-offine: kstrtoul failed\n");
+	if (offline_sz == 0) {
+		pr_info("mem-offline: no memory to offline for DDR size:%llu\n",
+			ram_sz);
 		return;
 	}
 
-	if (mp > 100) {
-		pr_err("mem-offine: Invalid mem-percent DT property\n");
-		return;
-	}
-	sz = ram_sz - ((ram_sz * mp) / 100);
+	sz = ram_sz - offline_sz;
 	memory_limit = (phys_addr_t)sz;
 	end_addr = memblock_max_addr(memory_limit);
 	addr_aligned = ALIGN(end_addr, MIN_MEMORY_BLOCK_SIZE);
diff --git a/drivers/Kconfig b/drivers/Kconfig
index da7406c..7e8029c 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -137,6 +137,8 @@
 
 source "drivers/xen/Kconfig"
 
+source "drivers/vservices/Kconfig"
+
 source "drivers/staging/Kconfig"
 
 source "drivers/platform/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 7baf297..abf600a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -11,6 +11,8 @@
 
 obj-$(CONFIG_GENERIC_PHY)	+= phy/
 
+obj-$(CONFIG_VSERVICES_SUPPORT)	+= vservices/
+
 # GPIO must come after pinctrl as gpios may need to mux pins etc
 obj-$(CONFIG_PINCTRL)		+= pinctrl/
 obj-$(CONFIG_GPIOLIB)		+= gpio/
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 0910d87..a2e59a9 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -487,7 +487,7 @@
 		&NODE_DATA(numa_node_id())->node_zones[ZONE_MOVABLE];
 	unsigned long used, block_sz = get_memory_block_size();
 
-	if (mem->state != MEM_ONLINE)
+	if (!populated_zone(movable_zone) || mem->state != MEM_ONLINE)
 		return snprintf(buf, 100, "0\n");
 
 	block_id = base_memory_block_id(mem->start_section_nr);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 175eb6d..a03deaa 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -578,3 +578,50 @@
 	has not installed a hidden back door to compromise the CPU's
 	random number generation facilities. This can also be configured
 	at boot with "random.trust_cpu=on/off".
+
+config OKL4_PIPE
+      bool "OKL4 Pipe Driver"
+      depends on OKL4_GUEST
+      default n
+      help
+        Virtual pipe driver for the OKL4 Microvisor. This driver allows
+        OKL4 Microvisor pipes to be exposed directly to user level as
+        character devices.
+
+config VSERVICES_SERIAL
+	tristate
+
+config VSERVICES_SERIAL_SERVER
+	tristate "Virtual Services serial server"
+	depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+	select VSERVICES_SERIAL
+	select VSERVICES_PROTOCOL_SERIAL_SERVER
+	default y
+	help
+	  Select this option if you want support for server side Virtual
+	  Services serial. A virtual serial service behaves similarly to
+	  a UNIX pseudo terminal (pty), and does not require any physical
+	  serial hardware. Virtual serial devices are typically called
+	  /dev/ttyVS0, /dev/ttyVS1, etc.
+
+config VSERVICES_SERIAL_CLIENT
+	tristate "Virtual Services serial client"
+	depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+	select VSERVICES_SERIAL
+	select VSERVICES_PROTOCOL_SERIAL_CLIENT
+	default y
+	help
+	  Select this option if you want support for client side Virtual
+	  Services serial. A virtual serial service behaves similarly to
+	  a UNIX pseudo terminal (pty), and does not require any physical
+	  serial hardware. Virtual serial devices are typically called
+	  /dev/ttyVS0, /dev/ttyVS1, etc.
+
+config VSERVICES_VTTY_COUNT
+	int "Maximum number of Virtual Services serial devices"
+	depends on VSERVICES_SERIAL
+	range 0 256
+	default "8"
+	help
+	  The maximum number of Virtual Services serial devices to support.
+	  This limit applies to both the client and server.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 58787fd..d9e4dfe 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -63,3 +63,11 @@
 endif
 obj-$(CONFIG_ADI)		+= adi.o
 obj-$(CONFIG_DIAG_CHAR)		+= diag/
+obj-$(CONFIG_OKL4_PIPE)		+= okl4_pipe.o
+CFLAGS_okl4_pipe.o			+= -Werror
+obj-$(CONFIG_VSERVICES_SERIAL)		+= vservices_serial.o
+CFLAGS_vservices_serial.o	+= -Werror
+obj-$(CONFIG_VSERVICES_SERIAL_CLIENT)	+= vs_serial_client.o
+CFLAGS_vs_serial_client.o	 += -Werror
+obj-$(CONFIG_VSERVICES_SERIAL_SERVER)	+= vs_serial_server.o
+CFLAGS_vs_serial_server.o	+= -Werror
diff --git a/drivers/char/okl4_pipe.c b/drivers/char/okl4_pipe.c
new file mode 100644
index 0000000..ccb87dd
--- /dev/null
+++ b/drivers/char/okl4_pipe.c
@@ -0,0 +1,676 @@
+/*
+ * drivers/char/okl4_pipe.c
+ *
+ * Copyright (c) 2015 General Dynamics
+ * Copyright (c) 2015 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * OKL4 Microvisor Pipes driver.
+ *
+ * Clients using this driver must have vclient names of the form
+ * "pipe%d", where %d is the pipe number, which must be
+ * unique and less than MAX_PIPES.
+ */
+
+/* #define DEBUG 1 */
+/* #define VERBOSE_DEBUG 1 */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/of.h>
+#include <asm/uaccess.h>
+#include <asm-generic/okl4_virq.h>
+
+#include <microvisor/microvisor.h>
+#if defined(CONFIG_OKL4_VIRTUALISATION)
+#include <asm/okl4-microvisor/okl4tags.h>
+#include <asm/okl4-microvisor/microvisor_bus.h>
+#include <asm/okl4-microvisor/virq.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
+#define __devinit
+#define __devexit
+#define __devexit_p(x) x
+#endif
+
+#define DRIVER_NAME "okl4-pipe"
+#define DEVICE_NAME "okl4-pipe"
+
+#ifndef CONFIG_OF
+#error "okl4-pipe driver only supported on device tree kernels"
+#endif
+
+#define MAX_PIPES 8
+
+#ifdef CONFIG_OKL4_INTERLEAVED_PRIORITIES
+extern int vcpu_prio_normal;
+#endif
+
+static int okl4_pipe_major;
+static struct class *okl4_pipe_class;
+
+/* This can be extended if required */
+struct okl4_pipe_mv {
+	int pipe_id;
+};
+
+struct okl4_pipe {
+	struct okl4_pipe_data_buffer *write_buf;
+	okl4_kcap_t pipe_tx_kcap;
+	okl4_kcap_t pipe_rx_kcap;
+	int tx_irq;
+	int rx_irq;
+	size_t max_msg_size;
+	int ref_count;
+	struct mutex pipe_mutex;
+	spinlock_t pipe_lock;
+
+	struct platform_device *pdev;
+	struct cdev cdev;
+
+	bool reset;
+	bool tx_maybe_avail;
+	bool rx_maybe_avail;
+
+	wait_queue_head_t rx_wait_q;
+	wait_queue_head_t tx_wait_q;
+	wait_queue_head_t poll_wait_q;
+
+	char *rx_buf;
+	size_t rx_buf_count;
+};
+static struct okl4_pipe pipes[MAX_PIPES];
+
+static okl4_error_t
+okl4_pipe_control(okl4_kcap_t kcap, uint8_t control)
+{
+	okl4_pipe_control_t x = 0;
+
+	okl4_pipe_control_setdoop(&x, true);
+	okl4_pipe_control_setoperation(&x, control);
+	return _okl4_sys_pipe_control(kcap, x);
+}
+
+static irqreturn_t
+okl4_pipe_tx_irq(int irq, void *dev)
+{
+	struct okl4_pipe *pipe = dev;
+	okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+	spin_lock(&pipe->pipe_lock);
+	if (okl4_pipe_state_gettxavailable(&payload))
+		pipe->tx_maybe_avail = true;
+	if (okl4_pipe_state_getreset(&payload)) {
+		pipe->reset = true;
+		pipe->tx_maybe_avail = true;
+	}
+	spin_unlock(&pipe->pipe_lock);
+
+	wake_up_interruptible(&pipe->tx_wait_q);
+	wake_up_interruptible(&pipe->poll_wait_q);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+okl4_pipe_rx_irq(int irq, void *dev)
+{
+	struct okl4_pipe *pipe = dev;
+	okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+	spin_lock(&pipe->pipe_lock);
+	if (okl4_pipe_state_getrxavailable(&payload))
+		pipe->rx_maybe_avail = true;
+	if (okl4_pipe_state_getreset(&payload)) {
+		pipe->reset = true;
+		pipe->rx_maybe_avail = true;
+	}
+	spin_unlock(&pipe->pipe_lock);
+
+	wake_up_interruptible(&pipe->rx_wait_q);
+	wake_up_interruptible(&pipe->poll_wait_q);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t
+okl4_pipe_read(struct file *filp, char __user *buf, size_t count,
+		loff_t *f_pos)
+{
+	struct okl4_pipe_mv *priv = filp->private_data;
+	int pipe_id = priv->pipe_id;
+	struct okl4_pipe *pipe = &pipes[pipe_id];
+	struct _okl4_sys_pipe_recv_return recv_return;
+	uint32_t *buffer = NULL;
+	size_t recv = 0;
+
+	if (!count)
+		return 0;
+
+again:
+	if (pipe->reset)
+		return -EPIPE;
+
+	if (!pipe->rx_maybe_avail && (filp->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	if (wait_event_interruptible(pipe->rx_wait_q, pipe->rx_maybe_avail))
+		return -ERESTARTSYS;
+
+	if (mutex_lock_interruptible(&pipe->pipe_mutex))
+		return -ERESTARTSYS;
+
+	/* Receive buffered data first */
+	if (pipe->rx_buf_count) {
+		recv = min(pipe->rx_buf_count, count);
+
+		if (copy_to_user(buf, pipe->rx_buf, recv)) {
+			mutex_unlock(&pipe->pipe_mutex);
+			return -EFAULT;
+		}
+
+		pipe->rx_buf_count -= recv;
+
+		if (pipe->rx_buf_count) {
+			memmove(pipe->rx_buf, pipe->rx_buf + recv,
+				pipe->max_msg_size - recv);
+		}
+
+		buf += recv;
+		count -= recv;
+		if (!count) {
+			mutex_unlock(&pipe->pipe_mutex);
+			return recv;
+		}
+	}
+
+	buffer = kmalloc(pipe->max_msg_size + sizeof(uint32_t), GFP_KERNEL);
+
+	if (!buffer) {
+		mutex_unlock(&pipe->pipe_mutex);
+		return -ENOMEM;
+	}
+
+	while (count) {
+		okl4_error_t ret;
+		size_t size;
+
+		spin_lock_irq(&pipe->pipe_lock);
+		recv_return = _okl4_sys_pipe_recv(pipe->pipe_rx_kcap,
+				pipe->max_msg_size + sizeof(uint32_t),
+				(void *)buffer);
+		ret = recv_return.error;
+
+		if (ret == OKL4_ERROR_PIPE_NOT_READY ||
+				ret == OKL4_ERROR_PIPE_EMPTY) {
+			pipe->rx_maybe_avail = false;
+			if (!recv) {
+				if (!(filp->f_flags & O_NONBLOCK)) {
+					spin_unlock_irq(&pipe->pipe_lock);
+					mutex_unlock(&pipe->pipe_mutex);
+					kfree(buffer);
+					goto again;
+				}
+				recv = -EAGAIN;
+			}
+			goto error;
+		} else if (ret != OKL4_OK) {
+			dev_err(&pipe->pdev->dev,
+					"pipe send returned error %d in okl4_pipe driver!\n",
+					(int)ret);
+			if (!recv)
+				recv = -ENXIO;
+			goto error;
+		}
+
+		spin_unlock_irq(&pipe->pipe_lock);
+
+		size = buffer[0];
+		if (size > pipe->max_msg_size) {
+			/* pipe error */
+			if (!recv)
+				recv = -EPROTO;
+			goto out;
+		}
+
+		/* Save extra received data */
+		if (size > count) {
+			pipe->rx_buf_count = size - count;
+			memcpy(pipe->rx_buf, (char*)&buffer[1] + count,
+					size - count);
+			size = count;
+		}
+
+		if (copy_to_user(buf, &buffer[1], size)) {
+			if (!recv)
+				recv = -EFAULT;
+			goto out;
+		}
+
+
+		count -= size;
+		buf += size;
+		recv += size;
+	}
+out:
+	mutex_unlock(&pipe->pipe_mutex);
+
+	kfree(buffer);
+	return recv;
+error:
+	spin_unlock_irq(&pipe->pipe_lock);
+	goto out;
+}
+
+static ssize_t
+okl4_pipe_write(struct file *filp, const char __user *buf, size_t count,
+		loff_t *f_pos)
+{
+	struct okl4_pipe_mv *priv = filp->private_data;
+	int pipe_id = priv->pipe_id;
+	struct okl4_pipe *pipe = &pipes[pipe_id];
+	uint32_t *buffer = NULL;
+	size_t sent = 0;
+
+	if (!count)
+		return 0;
+
+again:
+	if (pipe->reset)
+		return -EPIPE;
+
+	if (!pipe->tx_maybe_avail && (filp->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	if (wait_event_interruptible(pipe->tx_wait_q, pipe->tx_maybe_avail))
+		return -ERESTARTSYS;
+
+	if (mutex_lock_interruptible(&pipe->pipe_mutex))
+		return -ERESTARTSYS;
+
+	buffer = kmalloc(pipe->max_msg_size + sizeof(uint32_t), GFP_KERNEL);
+
+	if (!buffer) {
+		mutex_unlock(&pipe->pipe_mutex);
+		return -ENOMEM;
+	}
+
+	while (count) {
+		okl4_error_t ret;
+		size_t size = min(count, pipe->max_msg_size);
+		size_t pipe_size = roundup(size + sizeof(uint32_t),
+				sizeof(uint32_t));
+
+		if (copy_from_user(&buffer[1], buf, size)) {
+			if (!sent)
+				sent = -EFAULT;
+			break;
+		}
+
+		buffer[0] = size;
+
+		spin_lock_irq(&pipe->pipe_lock);
+		ret = _okl4_sys_pipe_send(pipe->pipe_tx_kcap, pipe_size,
+				(void *)buffer);
+		if (ret == OKL4_ERROR_PIPE_NOT_READY ||
+				ret == OKL4_ERROR_PIPE_FULL) {
+			pipe->tx_maybe_avail = false;
+			spin_unlock_irq(&pipe->pipe_lock);
+			if (!sent) {
+				if (filp->f_flags & O_NONBLOCK) {
+					sent = -EAGAIN;
+					break;
+				}
+				mutex_unlock(&pipe->pipe_mutex);
+				kfree(buffer);
+				goto again;
+			}
+			break;
+		} else if (ret != OKL4_OK) {
+			dev_err(&pipe->pdev->dev,
+					"pipe send returned error %d in okl4_pipe driver!\n",
+					(int)ret);
+			if (!sent)
+				sent = -ENXIO;
+			spin_unlock_irq(&pipe->pipe_lock);
+			break;
+		}
+		spin_unlock_irq(&pipe->pipe_lock);
+
+		count -= size;
+		buf += size;
+		sent += size;
+	}
+	mutex_unlock(&pipe->pipe_mutex);
+
+	kfree(buffer);
+	return sent;
+}
+
+
+static unsigned int
+okl4_pipe_poll(struct file *filp, struct poll_table_struct *poll_table)
+{
+	struct okl4_pipe_mv *priv = filp->private_data;
+	int pipe_id = priv->pipe_id;
+	struct okl4_pipe *pipe = &pipes[pipe_id];
+	unsigned int ret = 0;
+
+	poll_wait(filp, &pipe->poll_wait_q, poll_table);
+
+	spin_lock_irq(&pipe->pipe_lock);
+
+	if (pipe->rx_maybe_avail)
+		ret |= POLLIN | POLLRDNORM;
+	if (pipe->tx_maybe_avail)
+		ret |= POLLOUT | POLLWRNORM;
+	if (pipe->reset)
+		ret = POLLHUP;
+
+	spin_unlock_irq(&pipe->pipe_lock);
+
+	return ret;
+}
+
+static int
+okl4_pipe_open(struct inode *inode, struct file *filp)
+{
+	struct okl4_pipe *pipe = container_of(inode->i_cdev,
+			struct okl4_pipe, cdev);
+	struct okl4_pipe_mv *priv = dev_get_drvdata(&pipe->pdev->dev);
+
+	filp->private_data = priv;
+	if (!pipe->ref_count) {
+		pipe->rx_buf = kmalloc(pipe->max_msg_size, GFP_KERNEL);
+		if (!pipe->rx_buf)
+			return -ENOMEM;
+
+		mutex_init(&pipe->pipe_mutex);
+		spin_lock_init(&pipe->pipe_lock);
+
+		pipe->rx_buf_count = 0;
+		pipe->reset = false;
+		pipe->tx_maybe_avail = true;
+		pipe->rx_maybe_avail = true;
+
+		okl4_pipe_control(pipe->pipe_tx_kcap,
+				OKL4_PIPE_CONTROL_OP_SET_TX_READY);
+		okl4_pipe_control(pipe->pipe_rx_kcap,
+				OKL4_PIPE_CONTROL_OP_SET_RX_READY);
+	}
+	pipe->ref_count++;
+	return 0;
+}
+
+static int
+okl4_pipe_close(struct inode *inode, struct file *filp)
+{
+	struct okl4_pipe *pipe = container_of(inode->i_cdev,
+			struct okl4_pipe, cdev);
+
+	pipe->ref_count--;
+	if (!pipe->ref_count) {
+		okl4_pipe_control(pipe->pipe_rx_kcap,
+				OKL4_PIPE_CONTROL_OP_RESET);
+		okl4_pipe_control(pipe->pipe_tx_kcap,
+				OKL4_PIPE_CONTROL_OP_RESET);
+
+		kfree(pipe->rx_buf);
+		pipe->rx_buf = NULL;
+		pipe->rx_buf_count = 0;
+	}
+
+	return 0;
+}
+
+struct file_operations okl4_pipe_fops = {
+	.owner =	THIS_MODULE,
+	.read =		okl4_pipe_read,
+	.write =	okl4_pipe_write,
+	.open =		okl4_pipe_open,
+	.release =	okl4_pipe_close,
+	.poll =		okl4_pipe_poll,
+};
+
+static int __devinit
+okl4_pipe_probe(struct platform_device *pdev)
+{
+	struct okl4_pipe *pipe;
+	int err, pipe_id;
+	struct okl4_pipe_mv *priv;
+	dev_t dev_num;
+	struct device *device = NULL;
+	u32 reg[2];
+	struct resource *irq;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct okl4_pipe_mv),
+			GFP_KERNEL);
+	if (priv == NULL) {
+		err = -ENOMEM;
+		goto fail_alloc_priv;
+	}
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	pipe_id = of_alias_get_id(pdev->dev.of_node, "pipe");
+	if (pipe_id < 0) {
+		err = -ENXIO;
+		goto fail_pipe_id;
+	}
+
+	if (pipe_id < 0 || pipe_id >= MAX_PIPES) {
+		err = -ENXIO;
+		goto fail_pipe_id;
+	}
+
+	if (of_property_read_u32_array(pdev->dev.of_node, "reg", reg, 2)) {
+		dev_err(&pdev->dev, "need 2 reg resources\n");
+		err = -ENODEV;
+		goto fail_pipe_id;
+	}
+
+	/* Populate the private structure */
+	priv->pipe_id = pipe_id;
+
+	pipe = &pipes[pipe_id];
+
+	/* Set up and register the pipe device */
+	pipe->pdev = pdev;
+	dev_set_name(&pdev->dev, "%s%d", DEVICE_NAME, (int)pipe_id);
+
+	pipe->ref_count = 0;
+	pipe->pipe_tx_kcap = reg[0];
+	pipe->pipe_rx_kcap = reg[1];
+	pipe->max_msg_size = 64;
+
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq) {
+		dev_err(&pdev->dev, "no tx irq resource?\n");
+		err = -ENODEV;
+		goto fail_irq_resource;
+	}
+	pipe->tx_irq = irq->start;
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+	if (!irq) {
+		dev_err(&pdev->dev, "no rx irq resource?\n");
+		err = -ENODEV;
+		goto fail_irq_resource;
+	}
+	pipe->rx_irq = irq->start;
+
+	pipe->write_buf = kmalloc(sizeof(struct okl4_pipe_data_buffer *),
+                                                        GFP_KERNEL);
+	if (!pipe->write_buf) {
+		dev_err(&pdev->dev, "cannot allocate write buffer\n");
+		err = -ENOMEM;
+		goto fail_malloc_write;
+	}
+
+	init_waitqueue_head(&pipe->rx_wait_q);
+	init_waitqueue_head(&pipe->tx_wait_q);
+	init_waitqueue_head(&pipe->poll_wait_q);
+
+	err = devm_request_irq(&pdev->dev, pipe->rx_irq,
+			okl4_pipe_rx_irq, 0, dev_name(&pdev->dev),
+			pipe);
+	if (err) {
+		dev_err(&pdev->dev, "cannot register rx irq %d: %d\n",
+				(int)pipe->rx_irq, (int)err);
+		goto fail_request_rx_irq;
+	}
+
+	err = devm_request_irq(&pdev->dev, pipe->tx_irq,
+			okl4_pipe_tx_irq, 0, dev_name(&pdev->dev),
+			pipe);
+	if (err) {
+		dev_err(&pdev->dev, "cannot register tx irq %d: %d\n",
+				(int)pipe->tx_irq, (int)err);
+		goto fail_request_tx_irq;
+	}
+
+	dev_num = MKDEV(okl4_pipe_major, pipe_id);
+
+	cdev_init(&pipe->cdev, &okl4_pipe_fops);
+	pipe->cdev.owner = THIS_MODULE;
+	err = cdev_add(&pipe->cdev, dev_num, 1);
+	if (err) {
+		dev_err(&pdev->dev, "cannot add device: %d\n", (int)err);
+		goto fail_cdev_add;
+	}
+
+	device = device_create(okl4_pipe_class, NULL, dev_num, NULL,
+			DEVICE_NAME "%d", pipe_id);
+	if (IS_ERR(device)) {
+		err = PTR_ERR(device);
+		dev_err(&pdev->dev, "cannot create device: %d\n", (int)err);
+		goto fail_device_create;
+	}
+
+	return 0;
+
+fail_device_create:
+	cdev_del(&pipe->cdev);
+fail_cdev_add:
+	devm_free_irq(&pdev->dev, pipe->tx_irq, pipe);
+fail_request_tx_irq:
+	devm_free_irq(&pdev->dev, pipe->rx_irq, pipe);
+fail_request_rx_irq:
+	kfree(pipe->write_buf);
+fail_malloc_write:
+fail_irq_resource:
+fail_pipe_id:
+	dev_set_drvdata(&pdev->dev, NULL);
+	devm_kfree(&pdev->dev, priv);
+fail_alloc_priv:
+	return err;
+}
+
+static int __devexit
+okl4_pipe_remove(struct platform_device *pdev)
+{
+	struct okl4_pipe *pipe;
+	struct okl4_pipe_mv *priv = dev_get_drvdata(&pdev->dev);
+
+	if (priv->pipe_id < 0 || priv->pipe_id >= MAX_PIPES)
+		return -ENXIO;
+
+	pipe = &pipes[priv->pipe_id];
+
+	cdev_del(&pipe->cdev);
+
+	devm_free_irq(&pdev->dev, pipe->tx_irq, pipe);
+	devm_free_irq(&pdev->dev, pipe->rx_irq, pipe);
+
+	kfree(pipe->write_buf);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+	devm_kfree(&pdev->dev, priv);
+
+	return 0;
+}
+
+static const struct of_device_id okl4_pipe_match[] = {
+	{
+		.compatible = "okl,pipe",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, okl4_pipe_match);
+
+static struct platform_driver okl4_pipe_driver = {
+	.probe		= okl4_pipe_probe,
+	.remove		= __devexit_p(okl4_pipe_remove),
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = okl4_pipe_match,
+	},
+};
+
+static int __init
+okl4_pipe_init(void)
+{
+	int err;
+	dev_t dev_num = 0;
+
+	err = alloc_chrdev_region(&dev_num, 0, MAX_PIPES, DEVICE_NAME);
+	if (err < 0) {
+		printk("%s: cannot allocate device region\n", __func__);
+		goto fail_alloc_chrdev_region;
+	}
+	okl4_pipe_major = MAJOR(dev_num);
+
+	okl4_pipe_class = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(okl4_pipe_class)) {
+		err = PTR_ERR(okl4_pipe_class);
+		goto fail_class_create;
+	}
+
+	/* Register the driver with the microvisor bus */
+	err = platform_driver_register(&okl4_pipe_driver);
+	if (err)
+		goto fail_driver_register;
+
+	return 0;
+
+fail_driver_register:
+	class_destroy(okl4_pipe_class);
+fail_class_create:
+	unregister_chrdev_region(dev_num, MAX_PIPES);
+fail_alloc_chrdev_region:
+	return err;
+}
+
+static void __exit
+okl4_pipe_exit(void)
+{
+	dev_t dev_num = MKDEV(okl4_pipe_major, 0);
+
+	platform_driver_unregister(&okl4_pipe_driver);
+	class_destroy(okl4_pipe_class);
+	unregister_chrdev_region(dev_num, MAX_PIPES);
+}
+
+module_init(okl4_pipe_init);
+module_exit(okl4_pipe_exit);
+
+MODULE_DESCRIPTION("OKL4 pipe driver");
+MODULE_AUTHOR("John Clarke <johnc@cog.systems>");
diff --git a/drivers/char/vs_serial_client.c b/drivers/char/vs_serial_client.c
new file mode 100644
index 0000000..a0bf1cc
--- /dev/null
+++ b/drivers/char/vs_serial_client.c
@@ -0,0 +1,132 @@
+/*
+ * drivers/char/vs_serial_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Serial vService client driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/client.h>
+
+#include "vs_serial_common.h"
+
+#define client_state_to_port(state) \
+	container_of(state, struct vtty_port, u.vs_client)
+
+static struct vs_mbuf *vs_serial_client_alloc_msg_buf(struct vtty_port *port,
+		struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+	return vs_client_serial_serial_alloc_msg(&port->u.vs_client, pbuf,
+			gfp_flags);
+}
+
+static void vs_serial_client_free_msg_buf(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+	vs_client_serial_serial_free_msg(&port->u.vs_client, pbuf, mbuf);
+}
+
+static int vs_serial_client_send_msg_buf(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+	return vs_client_serial_serial_send_msg(&port->u.vs_client, *pbuf,
+			mbuf);
+}
+
+static bool vs_serial_client_is_vservices_running(struct vtty_port *port)
+{
+	return VSERVICE_BASE_STATE_IS_RUNNING(port->u.vs_client.state.base);
+}
+
+static struct vtty_port_ops client_port_ops = {
+	.alloc_msg_buf	= vs_serial_client_alloc_msg_buf,
+	.free_msg_buf	= vs_serial_client_free_msg_buf,
+	.send_msg_buf	= vs_serial_client_send_msg_buf,
+	.is_running	= vs_serial_client_is_vservices_running,
+};
+
+static struct vs_client_serial_state *
+vs_serial_client_alloc(struct vs_service_device *service)
+{
+	struct vtty_port *port;
+
+	port = vs_serial_alloc_port(service, &client_port_ops);
+	if (!port)
+		return NULL;
+
+	dev_set_drvdata(&service->dev, port);
+	return &port->u.vs_client;
+}
+
+static void vs_serial_client_release(struct vs_client_serial_state *_state)
+{
+	vs_serial_release(client_state_to_port(_state));
+}
+
+static void vs_serial_client_closed(struct vs_client_serial_state *_state)
+{
+	vs_serial_reset(client_state_to_port(_state));
+}
+
+static void vs_serial_client_opened(struct vs_client_serial_state *_state)
+{
+	struct vtty_port *port = client_state_to_port(_state);
+
+	dev_dbg(&port->service->dev, "ack_open\n");
+	port->max_transfer_size = _state->packet_size;
+}
+
+static int
+vs_serial_client_handle_message(struct vs_client_serial_state *_state,
+		struct vs_pbuf data, struct vs_mbuf *mbuf)
+{
+	return vs_serial_handle_message(client_state_to_port(_state), mbuf,
+			&data);
+}
+
+static struct vs_client_serial vs_client_serial_driver = {
+	.rx_atomic		= true,
+	.alloc			= vs_serial_client_alloc,
+	.release		= vs_serial_client_release,
+	.closed			= vs_serial_client_closed,
+	.opened			= vs_serial_client_opened,
+	.serial = {
+		.msg_msg	= vs_serial_client_handle_message,
+	},
+};
+
+static int __init vs_serial_client_init(void)
+{
+	return vservice_serial_client_register(&vs_client_serial_driver,
+			"vserial");
+}
+
+static void __exit vs_serial_client_exit(void)
+{
+	vservice_serial_client_unregister(&vs_client_serial_driver);
+}
+
+module_init(vs_serial_client_init);
+module_exit(vs_serial_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/char/vs_serial_common.h b/drivers/char/vs_serial_common.h
new file mode 100644
index 0000000..2fe7d28
--- /dev/null
+++ b/drivers/char/vs_serial_common.h
@@ -0,0 +1,91 @@
+/*
+ * drivers/char/vs_serial_common.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _VS_SERIAL_COMMON_H
+#define _VS_SERIAL_COMMON_H
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/console.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/protocol/serial/client.h>
+
+#define OUTBUFFER_SIZE 1024
+#define vtty_list_last_entry(ptr, type, member) \
+	list_entry((ptr)->prev, type, member)
+
+struct vtty_port;
+struct vs_service_device;
+
+struct vtty_port_ops {
+	struct vs_mbuf	*(*alloc_msg_buf)(struct vtty_port *port,
+			struct vs_pbuf *pbuf, gfp_t gfp_flags);
+	void		(*free_msg_buf)(struct vtty_port *port,
+			struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+	int		(*send_msg_buf)(struct vtty_port *port,
+			struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+	bool		(*is_running)(struct vtty_port *port);
+};
+
+struct vtty_port {
+	union {
+		struct vs_client_serial_state vs_client;
+		struct vs_server_serial_state vs_server;
+	} u;
+
+	struct vs_service_device	*service;
+	int				port_num;
+
+	struct tty_driver		*vtty_driver;
+
+	struct vtty_port_ops		ops;
+
+	/* output data */
+	bool				doing_release;
+
+	int				max_transfer_size;
+
+	/* Tracks if tty layer can receive data from driver */
+	bool				tty_canrecv;
+
+	/*
+	 * List of pending incoming buffers from the vServices stack. If we
+	 * receive a buffer, but cannot write it to the tty layer then we
+	 * queue it on this list to handle later. in_lock protects access to
+	 * the pending_in_packets list and the tty_canrecv field.
+	 */
+	struct list_head		pending_in_packets;
+	spinlock_t			in_lock;
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+	struct console			console;
+#endif
+
+	struct tty_port			port;
+};
+
+extern struct vtty_port *
+vs_serial_alloc_port(struct vs_service_device *service,
+	struct vtty_port_ops *port_ops);
+extern void vs_serial_release(struct vtty_port *port);
+extern void vs_serial_reset(struct vtty_port *port);
+extern int vs_serial_handle_message(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+
+#endif /* _VS_SERIAL_COMMON_H */
diff --git a/drivers/char/vs_serial_server.c b/drivers/char/vs_serial_server.c
new file mode 100644
index 0000000..d4a169e
--- /dev/null
+++ b/drivers/char/vs_serial_server.c
@@ -0,0 +1,152 @@
+/*
+ * drivers/char/vs_serial_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Serial vService server driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+
+#include "vs_serial_common.h"
+
+#define server_state_to_port(state) \
+	container_of(state, struct vtty_port, u.vs_server)
+
+static struct vs_mbuf *vs_serial_server_alloc_msg_buf(struct vtty_port *port,
+		struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+	return vs_server_serial_serial_alloc_msg(&port->u.vs_server, pbuf,
+			gfp_flags);
+}
+
+static void vs_serial_server_free_msg_buf(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+	vs_server_serial_serial_free_msg(&port->u.vs_server, pbuf, mbuf);
+}
+
+static int vs_serial_server_send_msg_buf(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+	return vs_server_serial_serial_send_msg(&port->u.vs_server, *pbuf, mbuf);
+}
+
+static bool vs_serial_server_is_vservices_running(struct vtty_port *port)
+{
+	return VSERVICE_BASE_STATE_IS_RUNNING(port->u.vs_server.state.base);
+}
+
+static struct vtty_port_ops server_port_ops = {
+	.alloc_msg_buf	= vs_serial_server_alloc_msg_buf,
+	.free_msg_buf	= vs_serial_server_free_msg_buf,
+	.send_msg_buf	= vs_serial_server_send_msg_buf,
+	.is_running	= vs_serial_server_is_vservices_running,
+};
+
+static struct vs_server_serial_state *
+vs_serial_server_alloc(struct vs_service_device *service)
+{
+	struct vtty_port *port;
+
+	port = vs_serial_alloc_port(service, &server_port_ops);
+	if (!port)
+		return NULL;
+
+	dev_set_drvdata(&service->dev, port);
+	return &port->u.vs_server;
+}
+
+static void vs_serial_server_release(struct vs_server_serial_state *_state)
+{
+	vs_serial_release(server_state_to_port(_state));
+}
+
+static void vs_serial_server_closed(struct vs_server_serial_state *_state)
+{
+	vs_serial_reset(server_state_to_port(_state));
+}
+
+static int
+vs_serial_server_handle_message(struct vs_server_serial_state *_state,
+		struct vs_pbuf data, struct vs_mbuf *mbuf)
+{
+	return vs_serial_handle_message(server_state_to_port(_state), mbuf,
+			&data);
+}
+
+static vs_server_response_type_t
+vs_serial_server_req_open(struct vs_server_serial_state *_state)
+{
+	struct vtty_port *port = server_state_to_port(_state);
+
+	dev_dbg(&port->service->dev, "req_open\n");
+
+	/* FIXME: Jira ticket SDK-3521 - ryanm. */
+	port->max_transfer_size = vs_service_max_mbuf_size(port->service) - 8;
+	_state->packet_size = port->max_transfer_size;
+
+	return VS_SERVER_RESP_SUCCESS;
+}
+
+static vs_server_response_type_t
+vs_serial_server_req_close(struct vs_server_serial_state *_state)
+{
+	struct vtty_port *port = server_state_to_port(_state);
+
+	dev_dbg(&port->service->dev, "req_close\n");
+
+	return VS_SERVER_RESP_SUCCESS;
+}
+
+static struct vs_server_serial vs_server_serial_driver = {
+	.rx_atomic		= true,
+	.alloc			= vs_serial_server_alloc,
+	.release		= vs_serial_server_release,
+	.closed			= vs_serial_server_closed,
+	.open			= vs_serial_server_req_open,
+	.close			= vs_serial_server_req_close,
+	.serial = {
+		.msg_msg	= vs_serial_server_handle_message,
+	},
+
+	/* Large default quota for batching data messages */
+	.in_quota_best		= 16,
+	.out_quota_best		= 16,
+};
+
+static int __init vs_serial_server_init(void)
+{
+	return vservice_serial_server_register(&vs_server_serial_driver,
+			"vserial");
+}
+
+static void __exit vs_serial_server_exit(void)
+{
+	vservice_serial_server_unregister(&vs_server_serial_driver);
+}
+
+module_init(vs_serial_server_init);
+module_exit(vs_serial_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/char/vservices_serial.c b/drivers/char/vservices_serial.c
new file mode 100644
index 0000000..53ed9f0
--- /dev/null
+++ b/drivers/char/vservices_serial.c
@@ -0,0 +1,634 @@
+/*
+ * drivers/char/vservice_serial.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * serial vservice client driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitmap.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/console.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/service.h>
+#include <vservices/wait.h>
+
+#include "vs_serial_common.h"
+
+struct vtty_in_packet {
+	struct vs_pbuf	pbuf;
+	size_t		offset;
+};
+
+static int max_ttys = CONFIG_VSERVICES_VTTY_COUNT;
+static unsigned long *alloced_ttys;
+module_param(max_ttys, int, S_IRUGO);
+
+static struct tty_driver *vtty_driver;
+
+static DEFINE_MUTEX(tty_bitmap_lock);
+
+static struct vtty_port *dev_to_port(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+#if defined(CONFIG_VSERVICES_SERIAL_SERVER) || \
+    defined(CONFIG_VSERIVCES_SERIAL_SERVER_MODULE)
+	if (service->is_server) {
+		struct vs_server_serial_state *server = dev_get_drvdata(dev);
+		return container_of(server, struct vtty_port, u.vs_server);
+	}
+#endif
+#if defined(CONFIG_VSERVICES_SERIAL_CLIENT) || \
+    defined(CONFIG_VSERIVCES_SERIAL_CLIENT_MODULE)
+	if (!service->is_server) {
+		struct vs_client_serial_state *client = dev_get_drvdata(dev);
+		return container_of(client, struct vtty_port, u.vs_client);
+	}
+#endif
+	/* should never get here */
+	WARN_ON(1);
+	return NULL;
+}
+
+static struct vtty_port *port_from_tty(struct tty_struct *tty)
+{
+	return dev_to_port(tty->dev->parent);
+}
+
+static int vtty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	struct vtty_port *port;
+
+	if (tty->index < 0 || !test_bit(tty->index, alloced_ttys))
+		return -ENXIO;
+
+	port = port_from_tty(tty);
+
+	if (!port)
+		return -ENXIO;
+
+	tty->driver_data = port;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+	if (tty->port)
+		tty->port->low_latency = 0;
+#else
+	tty->low_latency = 0;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+	tty_port_install(&port->port, driver, tty);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+	tty->port = &port->port;
+	tty_standard_install(driver, tty);
+#else
+	tty->port = &port->port;
+	if (tty_init_termios(tty) != 0)
+		return -ENOMEM;
+
+	tty_driver_kref_get(driver);
+	tty->count++;
+	driver->ttys[tty->index] = tty;
+#endif
+
+	return 0;
+}
+
+static int vtty_open(struct tty_struct *tty, struct file *file)
+{
+	struct vtty_port *port = tty->driver_data;
+	return tty_port_open(&port->port, tty, file);
+}
+
+static void vtty_close(struct tty_struct *tty, struct file *file)
+{
+	struct vtty_port *port = tty->driver_data;
+	if (port)
+		tty_port_close(&port->port, tty, file);
+}
+
+static void vtty_shutdown(struct tty_port *port)
+{
+	struct vtty_port *vtty_port =
+			container_of(port, struct vtty_port, port);
+
+	if (vtty_port->doing_release)
+		kfree(port);
+}
+
+static int vtty_write_room(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+
+	return vs_service_send_mbufs_available(port->service) *
+			port->max_transfer_size;
+}
+
+static struct vs_mbuf *vserial_alloc_send_buffer(struct vtty_port *port,
+		const unsigned char *buf, size_t size, struct vs_pbuf *pbuf,
+		gfp_t gfp_flags)
+{
+	struct vs_mbuf *mbuf;
+	ssize_t ret;
+
+	mbuf = port->ops.alloc_msg_buf(port, pbuf, gfp_flags);
+	if (IS_ERR(mbuf)) {
+		ret = PTR_ERR(mbuf);
+		goto fail;
+	}
+
+	ret = vs_pbuf_resize(pbuf, size);
+	if (ret < (ssize_t)size)
+		goto fail_free_buf;
+
+	ret = vs_pbuf_copyin(pbuf, 0, buf, size);
+	if (ret < (ssize_t)size)
+		goto fail_free_buf;
+
+	return mbuf;
+
+fail_free_buf:
+	port->ops.free_msg_buf(port, mbuf, pbuf);
+fail:
+	return ERR_PTR(ret);
+}
+
+static int vtty_write(struct tty_struct *tty, const unsigned char *buf,
+		int count)
+{
+	struct vtty_port *port;
+	size_t sent_bytes = 0, size;
+	struct vs_mbuf *mbuf;
+	struct vs_pbuf pbuf;
+	int err;
+
+	if (WARN_ON(!tty || !buf))
+		return -EINVAL;
+
+	port = tty->driver_data;
+	if (!port->ops.is_running(port)) {
+		dev_dbg(&port->service->dev, "tty is not running!");
+		return 0;
+	}
+
+	/*
+	 * We need to break our message up into chunks of
+	 * port->max_transfer_size.
+	 */
+	dev_dbg(&port->service->dev, "Writing %d bytes\n", count);
+	while (sent_bytes < count) {
+		size = min_t(size_t, count - sent_bytes,
+				port->max_transfer_size);
+
+		/*
+		 * Passing &port->u.vs_client here works for both the client
+		 * and the server since vs_client and vs_server are in the
+		 * same union, and therefore have the same address.
+		 */
+		mbuf = vs_service_waiting_alloc(&port->u.vs_client,
+				vserial_alloc_send_buffer(port,
+				buf + sent_bytes, size, &pbuf, GFP_KERNEL));
+		if (IS_ERR(mbuf)) {
+			dev_err(&port->service->dev,
+					"Failed to alloc mbuf of %zu bytes: %ld - resetting service\n",
+					size, PTR_ERR(mbuf));
+			vs_service_reset(port->service, port->service);
+			return -EIO;
+		}
+
+		vs_service_state_lock(port->service);
+		err = port->ops.send_msg_buf(port, mbuf, &pbuf);
+		vs_service_state_unlock(port->service);
+		if (err) {
+			port->ops.free_msg_buf(port, mbuf, &pbuf);
+			dev_err(&port->service->dev,
+					"send failed: %d - resetting service\n",
+					err);
+			vs_service_reset(port->service, port->service);
+			return -EIO;
+		}
+
+		dev_dbg(&port->service->dev, "Sent %zu bytes (%zu/%d)\n",
+				size, sent_bytes + size, count);
+		sent_bytes += size;
+	}
+
+	dev_dbg(&port->service->dev, "Write complete - sent %zu/%d bytes\n",
+			sent_bytes, count);
+	return sent_bytes;
+}
+
+static int vtty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	return vtty_write(tty, &ch, 1);
+}
+
+static size_t vs_serial_send_pbuf_to_tty(struct vtty_port *port,
+		struct vs_pbuf *pbuf, size_t offset)
+{
+	struct tty_struct *tty = tty_port_tty_get(&port->port);
+	size_t space, size;
+
+	lockdep_assert_held(&port->in_lock);
+
+	size = vs_pbuf_size(pbuf) - offset;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+	space = tty_buffer_request_room(tty->port, size);
+#else
+	space = tty_buffer_request_room(tty, size);
+#endif
+	if (space) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+		tty_insert_flip_string(tty->port, pbuf->data + offset, space);
+		tty_flip_buffer_push(tty->port);
+#else
+		tty_insert_flip_string(tty, pbuf->data + offset, space);
+		tty_flip_buffer_push(tty);
+#endif
+	}
+
+	tty_kref_put(tty);
+
+	/* Return the number of bytes written */
+	return space;
+}
+
+static void vtty_throttle(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+
+	dev_dbg(&port->service->dev, "throttle\n");
+
+	spin_lock_bh(&port->in_lock);
+	port->tty_canrecv = false;
+	spin_unlock_bh(&port->in_lock);
+}
+
+static void vtty_unthrottle(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+	struct vtty_in_packet *packet;
+	struct vs_mbuf *mbuf;
+	size_t sent;
+
+	dev_dbg(&port->service->dev, "unthrottle\n");
+
+	spin_lock_bh(&port->in_lock);
+
+	while (!list_empty(&port->pending_in_packets)) {
+		mbuf = list_first_entry(&port->pending_in_packets,
+				struct vs_mbuf, queue);
+		packet = mbuf->priv;
+
+		sent = vs_serial_send_pbuf_to_tty(port, &packet->pbuf,
+				packet->offset);
+		packet->offset += sent;
+		if (packet->offset < vs_pbuf_size(&packet->pbuf)) {
+			/*
+			 * Only wrote part of the buffer. This means that we
+			 * still have pending data that cannot be written to
+			 * the tty at this time. The tty layer will rethrottle
+			 * and this function will be called again when the tty
+			 * layer is next able to handle data and we can write
+			 * the remainder of the buffer.
+			 */
+			dev_dbg(&port->service->dev,
+					"unthrottle: Only wrote %zu (%zu/%zu) bytes\n",
+					sent, packet->offset,
+					vs_pbuf_size(&packet->pbuf));
+			break;
+		}
+
+		dev_dbg(&port->service->dev,
+				"unthrottle: wrote %zu (%zu/%zu) bytes\n",
+				sent, packet->offset,
+				vs_pbuf_size(&packet->pbuf));
+
+		/* Wrote the whole buffer - free it */
+		list_del(&mbuf->queue);
+		port->ops.free_msg_buf(port, mbuf, &packet->pbuf);
+		kfree(packet);
+	}
+
+	port->tty_canrecv = true;
+	spin_unlock_bh(&port->in_lock);
+}
+
+static struct tty_port_operations vtty_port_ops = {
+	.shutdown	= vtty_shutdown,
+};
+
+static struct tty_operations vtty_ops = {
+	.install	= vtty_install,
+	.open		= vtty_open,
+	.close		= vtty_close,
+	.write		= vtty_write,
+	.write_room	= vtty_write_room,
+	.put_char	= vtty_put_char,
+	.throttle	= vtty_throttle,
+	.unthrottle	= vtty_unthrottle
+};
+
+static int vs_serial_queue_incoming_packet(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf, size_t offset)
+{
+	struct vtty_in_packet *packet;
+
+	lockdep_assert_held(&port->in_lock);
+
+	packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+	if (!packet) {
+		/*
+		 * Uh oh, we are seriously out of memory. The incoming data
+		 * will be lost.
+		 */
+		return -ENOMEM;
+	}
+
+	dev_dbg(&port->service->dev, "Queuing packet %zu bytes, offset %zu\n",
+			vs_pbuf_size(pbuf), offset);
+	mbuf->priv = packet;
+	memcpy(&packet->pbuf, pbuf, sizeof(*pbuf));
+	packet->offset = offset;
+
+	list_add_tail(&mbuf->queue, &port->pending_in_packets);
+	return 0;
+}
+
+int vs_serial_handle_message(struct vtty_port *port, struct vs_mbuf *mbuf,
+		struct vs_pbuf *pbuf)
+{
+	struct tty_struct *tty = tty_port_tty_get(&port->port);
+	bool queue_packet = false;
+	size_t sent = 0;
+	int err;
+
+	if (!tty) {
+		dev_dbg(&port->service->dev,
+				"tty not open. Dropping %zu chars\n",
+				pbuf->size);
+		port->ops.free_msg_buf(port, mbuf, pbuf);
+		return 0;
+	}
+
+	dev_dbg(&port->service->dev, "Incoming message - len = %zu\n",
+			pbuf->size);
+
+	spin_lock(&port->in_lock);
+	if (!port->tty_canrecv || !list_empty(&port->pending_in_packets)) {
+		/*
+		 * We cannot send to the tty right now, either because we are
+		 * being throttled or because we still have pending data
+		 * to write out to the tty. Queue the buffer up so we can
+		 * write it later.
+		 */
+		dev_dbg(&port->service->dev,
+				"Cannot send (canrecv = %d, queued = %d) - queuing message\n",
+				port->tty_canrecv,
+				!list_empty(&port->pending_in_packets));
+		queue_packet = true;
+
+	} else {
+		sent = vs_serial_send_pbuf_to_tty(port, pbuf, 0);
+		if (sent < vs_pbuf_size(pbuf)) {
+			/*
+			 * Only wrote part of the buffer to the tty. Queue
+			 * the buffer to write the rest.
+			 */
+			dev_dbg(&port->service->dev,
+					"Sent %zu/%zu bytes to tty - queueing rest\n",
+					sent, vs_pbuf_size(pbuf));
+			queue_packet = true;
+		}
+	}
+
+	if (queue_packet) {
+		/*
+		 * Queue the incoming data up. If we are not already throttled,
+		 * the tty layer will do so now since it has no room in its
+		 * buffers.
+		 */
+		err = vs_serial_queue_incoming_packet(port, mbuf, pbuf, sent);
+		if (err) {
+			dev_err(&port->service->dev,
+					"Failed to queue packet - dropping chars\n");
+			port->ops.free_msg_buf(port, mbuf, pbuf);
+		}
+
+	} else {
+		port->ops.free_msg_buf(port, mbuf, pbuf);
+	}
+
+	spin_unlock(&port->in_lock);
+	tty_kref_put(tty);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_serial_handle_message);
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+static int vconsole_setup(struct console *co, char *options)
+{
+	if (co->index < 0 || co->index >= max_ttys)
+		co->index = 0;
+
+	pr_info("OKL4 virtual console init\n");
+
+	return 0;
+}
+
+static void vconsole_write(struct console *co, const char *p, unsigned count)
+{
+}
+
+static struct tty_driver *vconsole_device(struct console *co, int *index)
+{
+	*index = co->index;
+
+	return vtty_driver;
+}
+#endif /* CONFIG_OKL4_VTTY_CONSOLE */
+
+static void vs_serial_free_buffers(struct vtty_port *port)
+{
+	struct vtty_in_packet *packet;
+	struct vs_mbuf *mbuf;
+
+	/* Free the list of incoming buffers */
+	spin_lock_bh(&port->in_lock);
+	while (!list_empty(&port->pending_in_packets)) {
+		mbuf = list_first_entry(&port->pending_in_packets,
+				struct vs_mbuf, queue);
+		packet = mbuf->priv;
+
+		list_del(&mbuf->queue);
+		port->ops.free_msg_buf(port, mbuf, &packet->pbuf);
+		kfree(packet);
+	}
+	spin_unlock_bh(&port->in_lock);
+}
+
+/** vservices callbacks **/
+struct vtty_port *vs_serial_alloc_port(struct vs_service_device *service,
+		struct vtty_port_ops *port_ops)
+{
+	struct vtty_port *port;
+	int port_num;
+
+	mutex_lock(&tty_bitmap_lock);
+	port_num = find_first_zero_bit(alloced_ttys, max_ttys);
+
+	if (port_num >= max_ttys) {
+		mutex_unlock(&tty_bitmap_lock);
+		return NULL;
+	}
+
+	port = kzalloc(sizeof(struct vtty_port), GFP_KERNEL);
+	if (!port) {
+		mutex_unlock(&tty_bitmap_lock);
+		return NULL;
+	}
+
+	port->service = service;
+	port->ops = *port_ops;
+	port->tty_canrecv = true;
+	port->port_num = port_num;
+	INIT_LIST_HEAD(&port->pending_in_packets);
+	spin_lock_init(&port->in_lock);
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+	/* Set up and register the port's console device */
+	strlcpy(port->console.name, "vconvs", sizeof(port->console.name));
+	port->console.write = vconsole_write;
+	port->console.flags = CON_PRINTBUFFER;
+	port->console.device = vconsole_device;
+	port->console.setup = vconsole_setup;
+	port->console.index = port_num;
+
+	register_console(&port->console);
+#endif
+	port->vtty_driver = vtty_driver;
+
+	tty_port_init(&port->port);
+	port->port.ops = &vtty_port_ops;
+
+	tty_register_device(vtty_driver, port_num, &service->dev);
+	bitmap_set(alloced_ttys, port_num, 1);
+	mutex_unlock(&tty_bitmap_lock);
+
+	return port;
+}
+EXPORT_SYMBOL(vs_serial_alloc_port);
+
+void vs_serial_release(struct vtty_port *port)
+{
+	dev_dbg(&port->service->dev, "Release\n");
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+	unregister_console(&port->console);
+#endif
+
+	mutex_lock(&tty_bitmap_lock);
+	bitmap_clear(alloced_ttys, port->port_num, 1);
+	mutex_unlock(&tty_bitmap_lock);
+
+	if (port->port.tty) {
+		tty_vhangup(port->port.tty);
+		tty_kref_put(port->port.tty);
+	}
+
+	vs_serial_free_buffers(port);
+	port->doing_release = true;
+	tty_unregister_device(vtty_driver, port->port_num);
+}
+EXPORT_SYMBOL_GPL(vs_serial_release);
+
+void vs_serial_reset(struct vtty_port *port)
+{
+	/* Free list of in and out mbufs. */
+	vs_serial_free_buffers(port);
+}
+EXPORT_SYMBOL_GPL(vs_serial_reset);
+
+static int __init vs_serial_init(void)
+{
+	int err;
+
+	if (max_ttys == 0)
+		return -EINVAL;
+
+	alloced_ttys = kzalloc(sizeof(unsigned long) * BITS_TO_LONGS(max_ttys),
+			GFP_KERNEL);
+	if (!alloced_ttys) {
+		err = -ENOMEM;
+		goto fail_alloc_ttys;
+	}
+
+	/* Set up the tty driver. */
+	vtty_driver = alloc_tty_driver(max_ttys);
+	if (!vtty_driver) {
+		err = -ENOMEM;
+		goto fail_alloc_tty_driver;
+	}
+
+	vtty_driver->owner = THIS_MODULE;
+	vtty_driver->driver_name = "okl4-vservices-serial";
+	vtty_driver->name = "ttyVS";
+	vtty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	vtty_driver->subtype = SERIAL_TYPE_NORMAL;
+	vtty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	vtty_driver->init_termios = tty_std_termios;
+	vtty_driver->num = max_ttys;
+
+	/* These flags don't really matter; just use sensible defaults. */
+	vtty_driver->init_termios.c_cflag =
+			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+	vtty_driver->init_termios.c_ispeed = 9600;
+	vtty_driver->init_termios.c_ospeed = 9600;
+
+	tty_set_operations(vtty_driver, &vtty_ops);
+
+	err = tty_register_driver(vtty_driver);
+	if (err)
+		goto fail_tty_driver_register;
+
+	return 0;
+
+fail_tty_driver_register:
+	put_tty_driver(vtty_driver);
+fail_alloc_tty_driver:
+	kfree(alloced_ttys);
+fail_alloc_ttys:
+	return err;
+}
+
+static void __exit vs_serial_exit(void)
+{
+	tty_unregister_driver(vtty_driver);
+	put_tty_driver(vtty_driver);
+}
+
+module_init(vs_serial_init);
+module_exit(vs_serial_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Core Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 56ac166..1243e39 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -337,3 +337,13 @@
 	  Support for the debug clock controller on Qualcomm Technologies, Inc
 	  KONA devices.
 	  Say Y if you want to support the clock measurement functionality.
+
+config MSM_NPUCC_KONA
+	tristate "KONA NPU Clock Controller"
+	depends on COMMON_CLK_QCOM
+	select MSM_GCC_KONA
+	help
+	  Support for the NPU clock controller on Qualcomm Technologies, Inc.
+	  KONA devices.
+	  Say Y if you want to enable use of the Network Processing Unit in
+	  order to speed up certain types of calculations.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index fc85ab9..8104446 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -42,6 +42,7 @@
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_MSM_NPUCC_KONA) += npucc-kona.o
 obj-$(CONFIG_MSM_VIDEOCC_KONA) += videocc-kona.o
 obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
 obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
diff --git a/drivers/clk/qcom/clk-aop-qmp.c b/drivers/clk/qcom/clk-aop-qmp.c
index f813f46..3d69dc9 100644
--- a/drivers/clk/qcom/clk-aop-qmp.c
+++ b/drivers/clk/qcom/clk-aop-qmp.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -129,6 +129,12 @@
 	struct clk_aop_qmp *clk = to_aop_qmp_clk(hw);
 
 	mutex_lock(&clk_aop_lock);
+	/*
+	 * Return early if the clock has been enabled already. This
+	 * is to avoid issues with sending duplicate enable requests.
+	 */
+	if (clk->enabled)
+		goto err;
 
 	if (clk->level)
 		rate = clk->level;
@@ -143,7 +149,8 @@
 	ret = mbox_send_message(clk->mbox, &pkt);
 	if (ret < 0) {
 		pr_err("Failed to send clk prepare request for %s, ret %d\n",
-					clk_hw_get_name(hw), ret);
+				hw->core ? clk_hw_get_name(hw) : hw->init->name,
+					ret);
 		goto err;
 	}
 
@@ -167,6 +174,9 @@
 
 	mutex_lock(&clk_aop_lock);
 
+	if (!clk->enabled)
+		goto err;
+
 	rate = clk->disable_state;
 
 	snprintf(mbox_msg, MAX_LEN, "{class: %s, res: %s, val: %ld}",
@@ -213,9 +223,10 @@
 };
 
 static int qmp_update_client(struct clk_hw *hw, struct device *dev,
-		struct mbox_chan *mbox)
+		struct mbox_chan **mbox)
 {
 	struct clk_aop_qmp *clk_aop = to_aop_qmp_clk(hw);
+	int ret;
 
 	/* Use mailbox client with blocking mode */
 	clk_aop->cl.dev = dev;
@@ -223,17 +234,19 @@
 	clk_aop->cl.tx_tout = MBOX_TOUT_MS;
 	clk_aop->cl.knows_txdone = false;
 
-	if (mbox) {
-		clk_aop->mbox = mbox;
+	if (*mbox) {
+		clk_aop->mbox = *mbox;
 		return 0;
 	}
 
 	/* Allocate mailbox channel */
-	mbox = clk_aop->mbox = mbox_request_channel(&clk_aop->cl, 0);
-	if (IS_ERR(clk_aop->mbox) && PTR_ERR(clk_aop->mbox) != -EPROBE_DEFER) {
-		dev_err(dev, "Failed to get mailbox channel %pK %ld\n",
-						mbox, PTR_ERR(mbox));
-		return PTR_ERR(clk_aop->mbox);
+	*mbox = clk_aop->mbox = mbox_request_channel(&clk_aop->cl, 0);
+	if (IS_ERR(clk_aop->mbox)) {
+		ret = PTR_ERR(clk_aop->mbox);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get mailbox channel, ret %d\n",
+				ret);
+		return ret;
 	}
 
 	return 0;
@@ -244,6 +257,7 @@
 	struct clk *clk = NULL;
 	struct device_node *np = pdev->dev.of_node;
 	struct mbox_chan *mbox = NULL;
+	struct clk_onecell_data *clk_data;
 	int num_clks = ARRAY_SIZE(aop_qmp_clk_hws);
 	int ret = 0, i = 0;
 
@@ -251,14 +265,25 @@
 	 * Allocate mbox channel for the first clock client. The same channel
 	 * would be used for the rest of the clock clients.
 	 */
-	ret = qmp_update_client(aop_qmp_clk_hws[i], &pdev->dev, mbox);
+	ret = qmp_update_client(aop_qmp_clk_hws[i], &pdev->dev, &mbox);
 	if (ret < 0)
 		return ret;
 
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
+
+	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
+					sizeof(*clk_data->clks), GFP_KERNEL);
+	if (!clk_data->clks)
+		return -ENOMEM;
+
+	clk_data->clk_num = num_clks;
+
 	for (i = 1; i < num_clks; i++) {
 		if (!aop_qmp_clk_hws[i])
 			continue;
-		ret = qmp_update_client(aop_qmp_clk_hws[i], &pdev->dev, mbox);
+		ret = qmp_update_client(aop_qmp_clk_hws[i], &pdev->dev, &mbox);
 		if (ret < 0) {
 			dev_err(&pdev->dev, "Failed to update QMP client %d\n",
 							ret);
@@ -277,14 +302,16 @@
 	for (i = 0; i < num_clks; i++) {
 		if (!aop_qmp_clk_hws[i])
 			continue;
+
 		clk = devm_clk_register(&pdev->dev, aop_qmp_clk_hws[i]);
 		if (IS_ERR(clk)) {
 			ret = PTR_ERR(clk);
 			goto fail;
 		}
+		clk_data->clks[i] = clk;
 	}
 
-	ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+	ret = of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register clock provider\n");
 		goto fail;
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 9ad0efb..27bce32 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -171,6 +171,7 @@
 extern const struct clk_ops clk_gfx3d_ops;
 extern const struct clk_ops clk_rcg2_shared_ops;
 extern const struct clk_ops clk_dp_ops;
+extern const struct clk_ops clk_rcg2_dependent_ops;
 
 struct clk_rcg_dfs_data {
 	struct clk_rcg2 *rcg;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 25a13f8..a31079c 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1497,3 +1497,104 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
+
+static int clk_rcg2_dependent_enable(struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	int ret;
+
+	ret = clk_rcg2_enable(hw);
+	if (ret < 0)
+		return ret;
+
+	return clk_enable(rcg->clkr.dependent_hw->clk);
+}
+
+static void clk_rcg2_dependent_disable(struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+	clk_rcg2_disable(hw);
+	clk_disable(rcg->clkr.dependent_hw->clk);
+}
+
+static int clk_rcg2_dependent_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	struct clk_hw *p_hw;
+	int ret;
+
+	ret = clk_rcg2_set_parent(hw, index);
+	if (ret < 0)
+		return ret;
+
+	p_hw = clk_hw_get_parent_by_index(rcg->clkr.dependent_hw, index);
+	return clk_set_parent(rcg->clkr.dependent_hw->clk, p_hw->clk);
+}
+
+static int clk_rcg2_dependent_determine_rate(struct clk_hw *hw,
+						 struct clk_rate_request *req)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+	__clk_determine_rate(rcg->clkr.dependent_hw, req);
+
+	return clk_rcg2_determine_rate(hw, req);
+}
+
+static int clk_rcg2_dependent_set_rate(struct clk_hw *hw,
+				unsigned long rate, unsigned long parent_rate)
+{
+	int ret;
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+	ret = clk_rcg2_set_rate(hw, rate, parent_rate);
+	if (ret < 0)
+		return ret;
+
+	return clk_set_rate(rcg->clkr.dependent_hw->clk, rate);
+}
+
+static int clk_rcg2_dependent_set_rate_and_parent(struct clk_hw *hw,
+		unsigned long rate, unsigned long parent_rate, u8 index)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	int ret;
+
+	ret = clk_rcg2_set_rate_and_parent(hw, rate, parent_rate, index);
+	if (ret < 0)
+		return ret;
+
+	return clk_set_rate(rcg->clkr.dependent_hw->clk, rate);
+}
+
+static int clk_rcg2_dependent_prepare(struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+	return clk_prepare(rcg->clkr.dependent_hw->clk);
+}
+
+static void clk_rcg2_dependent_unprepare(struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+	clk_unprepare(rcg->clkr.dependent_hw->clk);
+}
+
+const struct clk_ops clk_rcg2_dependent_ops = {
+	.is_enabled = clk_rcg2_is_enabled,
+	.prepare = clk_rcg2_dependent_prepare,
+	.unprepare = clk_rcg2_dependent_unprepare,
+	.enable = clk_rcg2_dependent_enable,
+	.disable = clk_rcg2_dependent_disable,
+	.get_parent = clk_rcg2_get_parent,
+	.set_parent = clk_rcg2_dependent_set_parent,
+	.recalc_rate = clk_rcg2_recalc_rate,
+	.determine_rate = clk_rcg2_dependent_determine_rate,
+	.set_rate = clk_rcg2_dependent_set_rate,
+	.set_rate_and_parent = clk_rcg2_dependent_set_rate_and_parent,
+	.list_rate = clk_rcg2_list_rate,
+	.list_registers = clk_rcg2_list_registers,
+};
+EXPORT_SYMBOL(clk_rcg2_dependent_ops);
diff --git a/drivers/clk/qcom/clk-regmap.h b/drivers/clk/qcom/clk-regmap.h
index 115a937..05ce561 100644
--- a/drivers/clk/qcom/clk-regmap.h
+++ b/drivers/clk/qcom/clk-regmap.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2014, 2017-2018, The Linux Foundation. All rights reserved. */
 
 #ifndef __QCOM_CLK_REGMAP_H__
 #define __QCOM_CLK_REGMAP_H__
@@ -12,6 +12,7 @@
 /**
  * struct clk_regmap - regmap supporting clock
  * @hw:		handle between common and hardware-specific interfaces
+ * @dependent_hw: dependent clocks clock hw
  * @regmap:	regmap to use for regmap helpers and/or by providers
  * @enable_reg: register when using regmap enable/disable ops
  * @enable_mask: mask when using regmap enable/disable ops
@@ -20,6 +21,7 @@
  */
 struct clk_regmap {
 	struct clk_hw hw;
+	struct clk_hw *dependent_hw;
 	struct regmap *regmap;
 	unsigned int enable_reg;
 	unsigned int enable_mask;
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 9f4fc77..d7f3b9e 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/clk-provider.h>
@@ -36,6 +36,7 @@
 	struct clk_hw hw;
 	const char *res_name;
 	u8 div;
+	bool optional;
 	u32 res_addr;
 	u32 res_on_val;
 	u32 state;
@@ -54,13 +55,14 @@
 static DEFINE_MUTEX(rpmh_clk_lock);
 
 #define __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name,	\
-			  _res_en_offset, _res_on, _div)		\
+			  _res_en_offset, _res_on, _div, _optional)	\
 	static struct clk_rpmh _platform##_##_name_active;		\
 	static struct clk_rpmh _platform##_##_name = {			\
 		.res_name = _res_name,					\
 		.res_addr = _res_en_offset,				\
 		.res_on_val = _res_on,					\
 		.div = _div,						\
+		.optional = _optional,					\
 		.peer = &_platform##_##_name_active,			\
 		.valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) |	\
 				      BIT(RPMH_ACTIVE_ONLY_STATE) |	\
@@ -77,6 +79,7 @@
 		.res_addr = _res_en_offset,				\
 		.res_on_val = _res_on,					\
 		.div = _div,						\
+		.optional = _optional,					\
 		.peer = &_platform##_##_name,				\
 		.valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) |	\
 					BIT(RPMH_ACTIVE_ONLY_STATE)),	\
@@ -91,12 +94,19 @@
 #define DEFINE_CLK_RPMH_ARC(_platform, _name, _name_active, _res_name,	\
 			    _res_on, _div)				\
 	__DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name,	\
-			  CLK_RPMH_ARC_EN_OFFSET, _res_on, _div)
+			  CLK_RPMH_ARC_EN_OFFSET, _res_on, _div, false)
 
 #define DEFINE_CLK_RPMH_VRM(_platform, _name, _name_active, _res_name,	\
 				_div)					\
 	__DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name,	\
-			  CLK_RPMH_VRM_EN_OFFSET, 1, _div)
+			  CLK_RPMH_VRM_EN_OFFSET, 1, _div, false)
+
+#define DEFINE_CLK_RPMH_VRM_OPT(_platform, _name, _name_active,		\
+			_res_name, _div)				\
+	__DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name,	\
+			  CLK_RPMH_VRM_EN_OFFSET, 1, _div, true)
+
+
 
 static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw)
 {
@@ -238,17 +248,58 @@
 	.num_clks = ARRAY_SIZE(sdm845_rpmh_clocks),
 };
 
+DEFINE_CLK_RPMH_ARC(kona, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
+DEFINE_CLK_RPMH_VRM(kona, ln_bb_clk1, ln_bb_clk1_ao, "lnbclka1", 2);
+DEFINE_CLK_RPMH_VRM(kona, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2);
+DEFINE_CLK_RPMH_VRM(kona, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
+DEFINE_CLK_RPMH_VRM(kona, rf_clk1, rf_clk1_ao, "rfclka1", 1);
+DEFINE_CLK_RPMH_VRM(kona, rf_clk3, rf_clk3_ao, "rfclka3", 1);
+DEFINE_CLK_RPMH_VRM_OPT(kona, rf_clkd3, rf_clkd3_ao, "rfclkd3", 1);
+DEFINE_CLK_RPMH_VRM_OPT(kona, rf_clkd4, rf_clkd4_ao, "rfclkd4", 1);
+
+static struct clk_hw *kona_rpmh_clocks[] = {
+	[RPMH_CXO_CLK]		= &kona_bi_tcxo.hw,
+	[RPMH_CXO_CLK_A]	= &kona_bi_tcxo_ao.hw,
+	[RPMH_LN_BB_CLK1]	= &kona_ln_bb_clk1.hw,
+	[RPMH_LN_BB_CLK1_A]	= &kona_ln_bb_clk1_ao.hw,
+	[RPMH_LN_BB_CLK2]	= &kona_ln_bb_clk2.hw,
+	[RPMH_LN_BB_CLK2_A]	= &kona_ln_bb_clk2_ao.hw,
+	[RPMH_LN_BB_CLK3]	= &kona_ln_bb_clk3.hw,
+	[RPMH_LN_BB_CLK3_A]	= &kona_ln_bb_clk3_ao.hw,
+	[RPMH_RF_CLK1]		= &kona_rf_clk1.hw,
+	[RPMH_RF_CLK1_A]	= &kona_rf_clk1_ao.hw,
+	[RPMH_RF_CLK3]		= &kona_rf_clk3.hw,
+	[RPMH_RF_CLK3_A]	= &kona_rf_clk3_ao.hw,
+	[RPMH_RF_CLKD3]		= &kona_rf_clkd3.hw,
+	[RPMH_RF_CLKD3_A]	= &kona_rf_clkd3_ao.hw,
+	[RPMH_RF_CLKD4]		= &kona_rf_clkd4.hw,
+	[RPMH_RF_CLKD4_A]	= &kona_rf_clkd4_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_kona = {
+	.clks = kona_rpmh_clocks,
+	.num_clks = ARRAY_SIZE(kona_rpmh_clocks),
+};
+
 static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
 					 void *data)
 {
 	struct clk_rpmh_desc *rpmh = data;
 	unsigned int idx = clkspec->args[0];
+	struct clk_rpmh *c;
 
 	if (idx >= rpmh->num_clks) {
 		pr_err("%s: invalid index %u\n", __func__, idx);
 		return ERR_PTR(-EINVAL);
 	}
 
+	if (!rpmh->clks[idx])
+		return ERR_PTR(-ENOENT);
+
+	c = to_clk_rpmh(rpmh->clks[idx]);
+	if (!c->res_addr)
+		return ERR_PTR(-ENODEV);
+
 	return rpmh->clks[idx];
 }
 
@@ -268,9 +319,14 @@
 	for (i = 0; i < desc->num_clks; i++) {
 		u32 res_addr;
 
+		if (!hw_clks[i])
+			continue;
+
 		rpmh_clk = to_clk_rpmh(hw_clks[i]);
 		res_addr = cmd_db_read_addr(rpmh_clk->res_name);
 		if (!res_addr) {
+			if (rpmh_clk->optional)
+				continue;
 			dev_err(&pdev->dev, "missing RPMh resource address for %s\n",
 				rpmh_clk->res_name);
 			return -ENODEV;
@@ -301,6 +357,7 @@
 
 static const struct of_device_id clk_rpmh_match_table[] = {
 	{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
+	{ .compatible = "qcom,kona-rpmh-clk", .data = &clk_rpmh_kona},
 	{ }
 };
 MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/debugcc-kona.c b/drivers/clk/qcom/debugcc-kona.c
index 61afabd..e50a9d7 100644
--- a/drivers/clk/qcom/debugcc-kona.c
+++ b/drivers/clk/qcom/debugcc-kona.c
@@ -963,6 +963,10 @@
 	if (ret)
 		return ret;
 
+	ret = map_debug_bases(pdev, "qcom,npucc", NPU_CC);
+	if (ret)
+		return ret;
+
 	clk = devm_clk_register(&pdev->dev, &gcc_debug_mux.hw);
 	if (IS_ERR(clk)) {
 		dev_err(&pdev->dev, "Unable to register GCC debug mux\n");
diff --git a/drivers/clk/qcom/npucc-kona.c b/drivers/clk/qcom/npucc-kona.c
new file mode 100644
index 0000000..85e7774
--- /dev/null
+++ b/drivers/clk/qcom/npucc-kona.c
@@ -0,0 +1,1257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/string.h>
+
+#include <dt-bindings/clock/qcom,npucc-kona.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "reset.h"
+#include "vdd-level.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+
+#define HM0_CRC_SID_FSM_CTRL		0x11A0
+#define HM1_CRC_SID_FSM_CTRL		0x11B0
+#define CRC_SID_FSM_CTRL_SETTING	0x800000
+#define HM0_CRC_MND_CFG			0x11A4
+#define HM1_CRC_MND_CFG			0x11B4
+#define CRC_MND_CFG_SETTING		0x15011
+
+enum {
+	P_BI_TCXO,
+	P_CORE_BI_PLL_TEST_SE,
+	P_GCC_NPU_GPLL0_CLK,
+	P_GCC_NPU_GPLL0_DIV_CLK,
+	P_NPU_CC_PLL0_OUT_EVEN,
+	P_NPU_CC_PLL1_OUT_EVEN,
+	P_NPU_Q6SS_PLL_OUT_MAIN,
+	P_NPU_CC_CRC_DIV,
+};
+
+static const struct parent_map npu_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_NPU_CC_PLL1_OUT_EVEN, 1 },
+	{ P_NPU_CC_PLL0_OUT_EVEN, 2 },
+	{ P_GCC_NPU_GPLL0_CLK, 4 },
+	{ P_GCC_NPU_GPLL0_DIV_CLK, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const npu_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"npu_cc_pll1_out_even",
+	"npu_cc_pll0_out_even",
+	"gcc_npu_gpll0_clk_src",
+	"gcc_npu_gpll0_div_clk_src",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map npu_cc_parent_map_0_crc[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_NPU_CC_PLL1_OUT_EVEN, 1 },
+	{ P_NPU_CC_CRC_DIV, 2 },
+	{ P_GCC_NPU_GPLL0_CLK, 4 },
+	{ P_GCC_NPU_GPLL0_DIV_CLK, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const npu_cc_parent_names_0_crc[] = {
+	"bi_tcxo",
+	"npu_cc_pll1_out_even",
+	"npu_cc_crc_div",
+	"gcc_npu_gpll0_clk_src",
+	"gcc_npu_gpll0_div_clk_src",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map npu_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const npu_cc_parent_names_1_ao[] = {
+	"bi_tcxo_ao",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map npu_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_NPU_Q6SS_PLL_OUT_MAIN, 1 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const npu_cc_parent_names_2[] = {
+	"bi_tcxo",
+	"npu_q6ss_pll",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco lucid_vco[] = {
+	{ 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config npu_cc_pll0_config = {
+	.l = 0x14,
+	.cal_l = 0x44,
+	.alpha = 0xD555,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000000,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll npu_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_npu_cc_pll0_out_even[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv npu_cc_pll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_npu_cc_pll0_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_npu_cc_pll0_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_pll0_out_even",
+		.parent_names = (const char *[]){ "npu_cc_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct alpha_pll_config npu_cc_pll1_config = {
+	.l = 0x4E,
+	.cal_l = 0x44,
+	.alpha = 0x2000,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000000,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll npu_cc_pll1 = {
+	.offset = 0x400,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_pll1",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_npu_cc_pll1_out_even[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv npu_cc_pll1_out_even = {
+	.offset = 0x400,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_npu_cc_pll1_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_npu_cc_pll1_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_pll1_out_even",
+		.parent_names = (const char *[]){ "npu_cc_pll1" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct alpha_pll_config npu_q6ss_pll_config = {
+	.l = 0xD,
+	.cal_l = 0x44,
+	.alpha = 0x555,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000000,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll npu_q6ss_pll = {
+	.offset = 0x0,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_q6ss_pll",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static struct clk_fixed_factor npu_cc_crc_div = {
+	.mult = 1,
+	.div = 2,
+	.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_crc_div",
+		.parent_names = (const char *[]){ "npu_cc_pll0_out_even" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_src[] = {
+	F(200000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(300000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(466000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(533000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(850000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(1000000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 npu_cc_cal_hm1_clk_src = {
+	.cmd_rcgr = 0x1140,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = npu_cc_parent_map_0_crc,
+	.freq_tbl = ftbl_npu_cc_cal_hm0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_cal_hm1_clk_src",
+		.parent_names = npu_cc_parent_names_0_crc,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 200000000,
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 466000000,
+			[VDD_LOW_L1] = 533000000,
+			[VDD_NOMINAL] = 850000000,
+			[VDD_HIGH] = 1000000000},
+	},
+};
+
+static struct clk_rcg2 npu_cc_cal_hm0_clk_src = {
+	.cmd_rcgr = 0x1100,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = npu_cc_parent_map_0_crc,
+	.freq_tbl = ftbl_npu_cc_cal_hm0_clk_src,
+	.enable_safe_config = true,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm0_clk_src",
+			.parent_names = npu_cc_parent_names_0_crc,
+			.num_parents = 6,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_rcg2_dependent_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 200000000,
+				[VDD_LOWER] = 300000000,
+				[VDD_LOW] = 466000000,
+				[VDD_LOW_L1] = 533000000,
+				[VDD_NOMINAL] = 850000000,
+				[VDD_HIGH] = 1000000000},
+		},
+		/*
+		 * npu_cc_cal_hm0_clk_src and npu_cc_cal_hm1_clk_src must be
+		 * configured in lockstep.  They are sourced from the same PLL
+		 * and it is slewed.
+		 */
+		.dependent_hw = &npu_cc_cal_hm1_clk_src.clkr.hw,
+	},
+};
+
+static const struct freq_tbl ftbl_npu_cc_core_clk_src[] = {
+	F(60000000, P_GCC_NPU_GPLL0_DIV_CLK, 5, 0, 0),
+	F(100000000, P_GCC_NPU_GPLL0_DIV_CLK, 3, 0, 0),
+	F(200000000, P_GCC_NPU_GPLL0_CLK, 3, 0, 0),
+	F(333333333, P_NPU_CC_PLL1_OUT_EVEN, 4.5, 0, 0),
+	F(428571429, P_NPU_CC_PLL1_OUT_EVEN, 3.5, 0, 0),
+	F(500000000, P_NPU_CC_PLL1_OUT_EVEN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 npu_cc_core_clk_src = {
+	.cmd_rcgr = 0x1010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = npu_cc_parent_map_0,
+	.freq_tbl = ftbl_npu_cc_core_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_core_clk_src",
+		.parent_names = npu_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 60000000,
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW] = 200000000,
+			[VDD_LOW_L1] = 333333333,
+			[VDD_NOMINAL] = 428571429,
+			[VDD_HIGH] = 500000000},
+	},
+};
+
+static const struct freq_tbl ftbl_npu_cc_lmh_clk_src[] = {
+	F(60000000, P_GCC_NPU_GPLL0_DIV_CLK, 5, 0, 0),
+	F(100000000, P_GCC_NPU_GPLL0_DIV_CLK, 3, 0, 0),
+	F(200000000, P_GCC_NPU_GPLL0_CLK, 3, 0, 0),
+	F(214285714, P_NPU_CC_PLL1_OUT_EVEN, 7, 0, 0),
+	F(300000000, P_NPU_CC_PLL1_OUT_EVEN, 5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 npu_cc_lmh_clk_src = {
+	.cmd_rcgr = 0x1060,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = npu_cc_parent_map_0,
+	.freq_tbl = ftbl_npu_cc_lmh_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_lmh_clk_src",
+		.parent_names = npu_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 60000000,
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW] = 200000000,
+			[VDD_LOW_L1] = 214285714,
+			[VDD_NOMINAL] = 300000000},
+	},
+};
+
+static const struct freq_tbl ftbl_npu_cc_xo_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 npu_cc_xo_clk_src = {
+	.cmd_rcgr = 0x1400,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = npu_cc_parent_map_1,
+	.freq_tbl = ftbl_npu_cc_xo_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_xo_clk_src",
+		.parent_names = npu_cc_parent_names_1_ao,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_npu_dsp_core_clk_src[] = {
+	F(250000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
+	F(300000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
+	F(400000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
+	F(500000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
+	F(660000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
+	F(800000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 npu_dsp_core_clk_src = {
+	.cmd_rcgr = 0x28,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = npu_cc_parent_map_2,
+	.freq_tbl = ftbl_npu_dsp_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_dsp_core_clk_src",
+		.parent_names = npu_cc_parent_names_2,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 250000000,
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 400000000,
+			[VDD_LOW_L1] = 500000000,
+			[VDD_NOMINAL] = 660000000,
+			[VDD_HIGH] = 800000000},
+	},
+};
+
+static struct clk_branch npu_cc_atb_clk = {
+	.halt_reg = 0x10d0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10d0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_atb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_bto_core_clk = {
+	.halt_reg = 0x10dc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10dc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_bto_core_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_bwmon_clk = {
+	.halt_reg = 0x10d8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10d8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_bwmon_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_cal_hm0_cdc_clk = {
+	.halt_reg = 0x1098,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1098,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm0_cdc_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_cal_hm0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_cal_hm0_clk = {
+	.halt_reg = 0x1110,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1110,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm0_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_cal_hm0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_cal_hm0_dpm_ip_clk = {
+	.halt_reg = 0x109c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x109c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm0_dpm_ip_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_cal_hm0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_cal_hm0_perf_cnt_clk = {
+	.halt_reg = 0x10a0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10a0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm0_perf_cnt_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_cal_hm0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_cal_hm1_cdc_clk = {
+	.halt_reg = 0x10a4,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10a4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm1_cdc_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_cal_hm0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_cal_hm1_clk = {
+	.halt_reg = 0x1150,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1150,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm1_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_cal_hm0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_cal_hm1_dpm_ip_clk = {
+	.halt_reg = 0x10a8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10a8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm1_dpm_ip_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_cal_hm0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_cal_hm1_perf_cnt_clk = {
+	.halt_reg = 0x10ac,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10ac,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm1_perf_cnt_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_cal_hm0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_core_clk = {
+	.halt_reg = 0x1030,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1030,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_core_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dl_dpm_clk = {
+	.halt_reg = 0x1238,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1238,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dl_dpm_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_lmh_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dl_llm_clk = {
+	.halt_reg = 0x1234,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1234,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dl_llm_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_lmh_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dpm_clk = {
+	.halt_reg = 0x107c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x107c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dpm_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_lmh_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dpm_temp_clk = {
+	.halt_reg = 0x10c4,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x10c4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dpm_temp_clk",
+			.parent_names = (const char *[]){ "npu_llm_temp_clk" },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dpm_xo_clk = {
+	.halt_reg = 0x1094,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1094,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dpm_xo_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dsp_ahbm_clk = {
+	.halt_reg = 0x1214,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1214,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dsp_ahbm_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dsp_ahbs_clk = {
+	.halt_reg = 0x1210,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1210,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dsp_ahbs_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dsp_axi_clk = {
+	.halt_reg = 0x121c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x121c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dsp_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_npu_noc_axi_clk"
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dsp_bwmon_ahb_clk = {
+	.halt_reg = 0x1218,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1218,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dsp_bwmon_ahb_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dsp_bwmon_clk = {
+	.halt_reg = 0x1224,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1224,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dsp_bwmon_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_isense_clk = {
+	.halt_reg = 0x1078,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_isense_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_lmh_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_llm_clk = {
+	.halt_reg = 0x1074,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1074,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_llm_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_lmh_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_llm_curr_clk = {
+	.halt_reg = 0x10d4,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x10d4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_llm_curr_clk",
+			.parent_names = (const char *[]){ "npu_llm_curr_clk" },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_llm_temp_clk = {
+	.halt_reg = 0x10c8,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x10c8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_llm_temp_clk",
+			.parent_names = (const char *[]){ "npu_llm_temp_clk" },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_llm_xo_clk = {
+	.halt_reg = 0x1090,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1090,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_llm_xo_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_noc_ahb_clk = {
+	.halt_reg = 0x10c0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10c0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_noc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_noc_axi_clk = {
+	.halt_reg = 0x10b8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10b8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_noc_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_npu_noc_axi_clk"
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_noc_dma_clk = {
+	.halt_reg = 0x10b0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10b0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_noc_dma_clk",
+			.parent_names = (const char *[]){
+				"gcc_npu_noc_dma_clk"
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_rsc_xo_clk = {
+	.halt_reg = 0x10e0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10e0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_rsc_xo_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_s2p_clk = {
+	.halt_reg = 0x10cc,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x10cc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_s2p_clk",
+			.parent_names = (const char *[]){ "npu_s2p_clk" },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_xo_clk = {
+	.halt_reg = 0x1410,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1410,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_xo_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *npu_cc_kona_clocks[] = {
+	[NPU_CC_ATB_CLK] = &npu_cc_atb_clk.clkr,
+	[NPU_CC_BTO_CORE_CLK] = &npu_cc_bto_core_clk.clkr,
+	[NPU_CC_BWMON_CLK] = &npu_cc_bwmon_clk.clkr,
+	[NPU_CC_CAL_HM0_CDC_CLK] = &npu_cc_cal_hm0_cdc_clk.clkr,
+	[NPU_CC_CAL_HM0_CLK] = &npu_cc_cal_hm0_clk.clkr,
+	[NPU_CC_CAL_HM0_CLK_SRC] = &npu_cc_cal_hm0_clk_src.clkr,
+	[NPU_CC_CAL_HM0_DPM_IP_CLK] = &npu_cc_cal_hm0_dpm_ip_clk.clkr,
+	[NPU_CC_CAL_HM0_PERF_CNT_CLK] = &npu_cc_cal_hm0_perf_cnt_clk.clkr,
+	[NPU_CC_CAL_HM1_CDC_CLK] = &npu_cc_cal_hm1_cdc_clk.clkr,
+	[NPU_CC_CAL_HM1_CLK] = &npu_cc_cal_hm1_clk.clkr,
+	[NPU_CC_CAL_HM1_CLK_SRC] = &npu_cc_cal_hm1_clk_src.clkr,
+	[NPU_CC_CAL_HM1_DPM_IP_CLK] = &npu_cc_cal_hm1_dpm_ip_clk.clkr,
+	[NPU_CC_CAL_HM1_PERF_CNT_CLK] = &npu_cc_cal_hm1_perf_cnt_clk.clkr,
+	[NPU_CC_CORE_CLK] = &npu_cc_core_clk.clkr,
+	[NPU_CC_CORE_CLK_SRC] = &npu_cc_core_clk_src.clkr,
+	[NPU_CC_DL_DPM_CLK] = &npu_cc_dl_dpm_clk.clkr,
+	[NPU_CC_DL_LLM_CLK] = &npu_cc_dl_llm_clk.clkr,
+	[NPU_CC_DPM_CLK] = &npu_cc_dpm_clk.clkr,
+	[NPU_CC_DPM_TEMP_CLK] = &npu_cc_dpm_temp_clk.clkr,
+	[NPU_CC_DPM_XO_CLK] = &npu_cc_dpm_xo_clk.clkr,
+	[NPU_CC_DSP_AHBM_CLK] = &npu_cc_dsp_ahbm_clk.clkr,
+	[NPU_CC_DSP_AHBS_CLK] = &npu_cc_dsp_ahbs_clk.clkr,
+	[NPU_CC_DSP_AXI_CLK] = &npu_cc_dsp_axi_clk.clkr,
+	[NPU_CC_DSP_BWMON_AHB_CLK] = &npu_cc_dsp_bwmon_ahb_clk.clkr,
+	[NPU_CC_DSP_BWMON_CLK] = &npu_cc_dsp_bwmon_clk.clkr,
+	[NPU_CC_ISENSE_CLK] = &npu_cc_isense_clk.clkr,
+	[NPU_CC_LLM_CLK] = &npu_cc_llm_clk.clkr,
+	[NPU_CC_LLM_CURR_CLK] = &npu_cc_llm_curr_clk.clkr,
+	[NPU_CC_LLM_TEMP_CLK] = &npu_cc_llm_temp_clk.clkr,
+	[NPU_CC_LLM_XO_CLK] = &npu_cc_llm_xo_clk.clkr,
+	[NPU_CC_LMH_CLK_SRC] = &npu_cc_lmh_clk_src.clkr,
+	[NPU_CC_NOC_AHB_CLK] = &npu_cc_noc_ahb_clk.clkr,
+	[NPU_CC_NOC_AXI_CLK] = &npu_cc_noc_axi_clk.clkr,
+	[NPU_CC_NOC_DMA_CLK] = &npu_cc_noc_dma_clk.clkr,
+	[NPU_CC_PLL0] = &npu_cc_pll0.clkr,
+	[NPU_CC_PLL0_OUT_EVEN] = &npu_cc_pll0_out_even.clkr,
+	[NPU_CC_PLL1] = &npu_cc_pll1.clkr,
+	[NPU_CC_PLL1_OUT_EVEN] = &npu_cc_pll1_out_even.clkr,
+	[NPU_CC_RSC_XO_CLK] = &npu_cc_rsc_xo_clk.clkr,
+	[NPU_CC_S2P_CLK] = &npu_cc_s2p_clk.clkr,
+	[NPU_CC_XO_CLK] = &npu_cc_xo_clk.clkr,
+	[NPU_CC_XO_CLK_SRC] = &npu_cc_xo_clk_src.clkr,
+};
+
+static struct clk_regmap *npu_qdsp6ss_kona_clocks[] = {
+	[NPU_DSP_CORE_CLK_SRC] = &npu_dsp_core_clk_src.clkr,
+};
+
+static struct clk_regmap *npu_qdsp6ss_pll_kona_clocks[] = {
+	[NPU_Q6SS_PLL] = &npu_q6ss_pll.clkr,
+};
+
+static const struct qcom_reset_map npu_cc_kona_resets[] = {
+	[NPU_CC_CAL_HM0_BCR] = { 0x10f0 },
+	[NPU_CC_CAL_HM1_BCR] = { 0x1130 },
+	[NPU_CC_CORE_BCR] = { 0x1000 },
+	[NPU_CC_DPM_TEMP_CLK_ARES] = { 0x10c4, BIT(2) },
+	[NPU_CC_DSP_BCR] = { 0x1200 },
+	[NPU_CC_LLM_CURR_CLK_ARES] = { 0x10d4, BIT(2) },
+	[NPU_CC_LLM_TEMP_CLK_ARES] = { 0x10c8, BIT(2) },
+};
+
+static const struct regmap_config npu_cc_kona_regmap_config = {
+	.name = "cc",
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0xa060,
+	.fast_io = true,
+};
+
+static const struct regmap_config npu_qdsp6ss_kona_regmap_config = {
+	.name = "qdsp6ss",
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x203c,
+	.fast_io = true,
+};
+
+static const struct regmap_config npu_qdsp6ss_pll_kona_regmap_config = {
+	.name = "qdsp6ss_pll",
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x50,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc npu_cc_kona_desc = {
+	.config = &npu_cc_kona_regmap_config,
+	.clks = npu_cc_kona_clocks,
+	.num_clks = ARRAY_SIZE(npu_cc_kona_clocks),
+	.resets = npu_cc_kona_resets,
+	.num_resets = ARRAY_SIZE(npu_cc_kona_resets),
+};
+
+static const struct qcom_cc_desc npu_qdsp6ss_kona_desc = {
+	.config = &npu_qdsp6ss_kona_regmap_config,
+	.clks = npu_qdsp6ss_kona_clocks,
+	.num_clks = ARRAY_SIZE(npu_qdsp6ss_kona_clocks),
+};
+
+static const struct qcom_cc_desc npu_qdsp6ss_pll_kona_desc = {
+	.config = &npu_qdsp6ss_pll_kona_regmap_config,
+	.clks = npu_qdsp6ss_pll_kona_clocks,
+	.num_clks = ARRAY_SIZE(npu_qdsp6ss_pll_kona_clocks),
+};
+
+static const struct of_device_id npu_cc_kona_match_table[] = {
+	{ .compatible = "qcom,npucc-kona" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, npu_cc_kona_match_table);
+
+static void enable_npu_crc(struct regmap *regmap)
+{
+	regmap_write(regmap, HM0_CRC_MND_CFG, CRC_MND_CFG_SETTING);
+	regmap_write(regmap, HM0_CRC_SID_FSM_CTRL, CRC_SID_FSM_CTRL_SETTING);
+	regmap_write(regmap, HM1_CRC_MND_CFG, CRC_MND_CFG_SETTING);
+	regmap_write(regmap, HM1_CRC_SID_FSM_CTRL, CRC_SID_FSM_CTRL_SETTING);
+}
+
+static int npu_clocks_kona_probe(struct platform_device *pdev,
+				 const struct qcom_cc_desc *desc)
+{
+	struct regmap *regmap;
+	struct resource *res;
+	void __iomem *base;
+	int ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   desc->config->name);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	regmap = devm_regmap_init_mmio(&pdev->dev, base, desc->config);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	if (!strcmp("cc", desc->config->name)) {
+		clk_lucid_pll_configure(&npu_cc_pll0, regmap,
+					&npu_cc_pll0_config);
+		clk_lucid_pll_configure(&npu_cc_pll1, regmap,
+					&npu_cc_pll1_config);
+		enable_npu_crc(regmap);
+
+		/* Register the fixed factor clock for CRC divider */
+		ret = devm_clk_hw_register(&pdev->dev, &npu_cc_crc_div.hw);
+		if (ret) {
+			dev_err(&pdev->dev, "Failed to register CRC divider clock, ret=%d\n",
+				ret);
+			return ret;
+		}
+	} else if (!strcmp("qdsp6ss_pll", desc->config->name)) {
+		clk_lucid_pll_configure(&npu_q6ss_pll, regmap,
+					&npu_q6ss_pll_config);
+	}
+
+	return qcom_cc_really_probe(pdev, desc, regmap);
+}
+
+static int npu_cc_kona_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		ret = PTR_ERR(vdd_cx.regulator[0]);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get vdd_cx regulator, ret=%d\n",
+				ret);
+		return ret;
+	}
+
+	ret = npu_clocks_kona_probe(pdev, &npu_cc_kona_desc);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "npu_cc clock registration failed, ret=%d\n",
+			ret);
+		return ret;
+	}
+
+	ret = npu_clocks_kona_probe(pdev, &npu_qdsp6ss_kona_desc);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "npu_qdsp6ss clock registration failed, ret=%d\n",
+			ret);
+		return ret;
+	}
+
+	ret = npu_clocks_kona_probe(pdev, &npu_qdsp6ss_pll_kona_desc);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "npu_qdsp6ss_pll clock registration failed, ret=%d\n",
+			ret);
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered NPU_CC clocks\n");
+
+	return 0;
+}
+
+static struct platform_driver npu_cc_kona_driver = {
+	.probe = npu_cc_kona_probe,
+	.driver = {
+		.name = "npu_cc-kona",
+		.of_match_table = npu_cc_kona_match_table,
+	},
+};
+
+static int __init npu_cc_kona_init(void)
+{
+	return platform_driver_register(&npu_cc_kona_driver);
+}
+subsys_initcall(npu_cc_kona_init);
+
+static void __exit npu_cc_kona_exit(void)
+{
+	platform_driver_unregister(&npu_cc_kona_driver);
+}
+module_exit(npu_cc_kona_exit);
+
+MODULE_DESCRIPTION("QTI NPU_CC KONA Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:npu_cc-kona");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 3242af0..8cdc981 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -476,6 +476,14 @@
 config SRAM_EXEC
 	bool
 
+config QSEECOM
+	tristate "QTI Secure Execution Communicator driver"
+	help
+	  Provides a communication interface between userspace and
+	  QTI Secure Execution Environment (QSEE) using Secure Channel
+	  Manager (SCM) interface. It exposes APIs for both userspace and
+	  kernel clients.
+
 config VEXPRESS_SYSCFG
 	bool "Versatile Express System Configuration driver"
 	depends on VEXPRESS_CONFIG
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f4d0fd9..883197b 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -50,6 +50,7 @@
 obj-$(CONFIG_SRAM_EXEC)		+= sram-exec.o
 obj-y				+= mic/
 obj-$(CONFIG_GENWQE)		+= genwqe/
+obj-$(CONFIG_QSEECOM)		+= qseecom.o
 obj-$(CONFIG_ECHO)		+= echo/
 obj-$(CONFIG_VEXPRESS_SYSCFG)	+= vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)		+= cxl/
diff --git a/drivers/misc/compat_qseecom.c b/drivers/misc/compat_qseecom.c
new file mode 100644
index 0000000..58794e4
--- /dev/null
+++ b/drivers/misc/compat_qseecom.c
@@ -0,0 +1,914 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/qseecom.h>
+#include <linux/compat.h>
+#include "compat_qseecom.h"
+
+static int compat_get_qseecom_register_listener_req(
+		struct compat_qseecom_register_listener_req __user *data32,
+		struct qseecom_register_listener_req __user *data)
+{
+	int err;
+	compat_ulong_t listener_id;
+	compat_long_t ifd_data_fd;
+	compat_uptr_t virt_sb_base;
+	compat_ulong_t sb_size;
+
+	err = get_user(listener_id, &data32->listener_id);
+	err |= put_user(listener_id, &data->listener_id);
+	err |= get_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+
+	err |= get_user(virt_sb_base, &data32->virt_sb_base);
+	/* upper bits won't get set, zero them */
+	err |= put_user(NULL, &data->virt_sb_base);
+	err |= put_user(virt_sb_base, (compat_uptr_t *)&data->virt_sb_base);
+
+	err |= get_user(sb_size, &data32->sb_size);
+	err |= put_user(sb_size, &data->sb_size);
+	return err;
+}
+
+static int compat_get_qseecom_load_img_req(
+		struct compat_qseecom_load_img_req __user *data32,
+		struct qseecom_load_img_req __user *data)
+{
+	int err;
+	compat_ulong_t mdt_len;
+	compat_ulong_t img_len;
+	compat_long_t ifd_data_fd;
+	compat_ulong_t app_arch;
+	compat_uint_t app_id;
+
+	err = get_user(mdt_len, &data32->mdt_len);
+	err |= put_user(mdt_len, &data->mdt_len);
+	err |= get_user(img_len, &data32->img_len);
+	err |= put_user(img_len, &data->img_len);
+	err |= get_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+	err |= copy_in_user(data->img_name, data32->img_name,
+				MAX_APP_NAME_SIZE);
+	err |= get_user(app_arch, &data32->app_arch);
+	err |= put_user(app_arch, &data->app_arch);
+	err |= get_user(app_id, &data32->app_id);
+	err |= put_user(app_id, &data->app_id);
+	return err;
+}
+
+static int compat_get_qseecom_send_cmd_req(
+		struct compat_qseecom_send_cmd_req __user *data32,
+		struct qseecom_send_cmd_req __user *data)
+{
+	int err;
+	compat_uptr_t cmd_req_buf;
+	compat_uint_t cmd_req_len;
+	compat_uptr_t resp_buf;
+	compat_uint_t resp_len;
+
+	err = get_user(cmd_req_buf, &data32->cmd_req_buf);
+	err |= put_user(NULL, &data->cmd_req_buf);
+	err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+	err |= get_user(cmd_req_len, &data32->cmd_req_len);
+	err |= put_user(cmd_req_len, &data->cmd_req_len);
+
+	err |= get_user(resp_buf, &data32->resp_buf);
+	err |= put_user(NULL, &data->resp_buf);
+	err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	return err;
+}
+
+static int compat_get_qseecom_send_modfd_cmd_req(
+		struct compat_qseecom_send_modfd_cmd_req __user *data32,
+		struct qseecom_send_modfd_cmd_req __user *data)
+{
+	int err;
+	unsigned int i;
+	compat_uptr_t cmd_req_buf;
+	compat_uint_t cmd_req_len;
+	compat_uptr_t resp_buf;
+	compat_uint_t resp_len;
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+
+	err = get_user(cmd_req_buf, &data32->cmd_req_buf);
+	err |= put_user(NULL, &data->cmd_req_buf);
+	err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+	err |= get_user(cmd_req_len, &data32->cmd_req_len);
+	err |= put_user(cmd_req_len, &data->cmd_req_len);
+	err |= get_user(resp_buf, &data32->resp_buf);
+	err |= put_user(NULL, &data->resp_buf);
+	err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	for (i = 0; i < MAX_ION_FD; i++) {
+		err |= get_user(fd, &data32->ifd_data[i].fd);
+		err |= put_user(fd, &data->ifd_data[i].fd);
+		err |= get_user(cmd_buf_offset,
+				&data32->ifd_data[i].cmd_buf_offset);
+		err |= put_user(cmd_buf_offset,
+				&data->ifd_data[i].cmd_buf_offset);
+	}
+	return err;
+}
+
+static int compat_get_qseecom_set_sb_mem_param_req(
+		struct compat_qseecom_set_sb_mem_param_req __user *data32,
+		struct qseecom_set_sb_mem_param_req __user *data)
+{
+	int err;
+	compat_long_t ifd_data_fd;
+	compat_uptr_t virt_sb_base;
+	compat_ulong_t sb_len;
+
+	err = get_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+	err |= get_user(virt_sb_base, &data32->virt_sb_base);
+	err |= put_user(NULL, &data->virt_sb_base);
+	err |= put_user(virt_sb_base, (compat_uptr_t *)&data->virt_sb_base);
+	err |= get_user(sb_len, &data32->sb_len);
+	err |= put_user(sb_len, &data->sb_len);
+	return err;
+}
+
+static int compat_get_qseecom_qseos_version_req(
+		struct compat_qseecom_qseos_version_req __user *data32,
+		struct qseecom_qseos_version_req __user *data)
+{
+	int err;
+	compat_uint_t qseos_version;
+
+	err = get_user(qseos_version, &data32->qseos_version);
+	err |= put_user(qseos_version, &data->qseos_version);
+	return err;
+}
+
+static int compat_get_qseecom_qseos_app_load_query(
+		struct compat_qseecom_qseos_app_load_query __user *data32,
+		struct qseecom_qseos_app_load_query __user *data)
+{
+	int err = 0;
+	unsigned int i;
+	compat_uint_t app_id;
+	char app_name;
+	compat_ulong_t app_arch;
+
+	for (i = 0; i < MAX_APP_NAME_SIZE; i++) {
+		err |= get_user(app_name, &(data32->app_name[i]));
+		err |= put_user(app_name, &(data->app_name[i]));
+	}
+	err |= get_user(app_id, &data32->app_id);
+	err |= put_user(app_id, &data->app_id);
+	err |= get_user(app_arch, &data32->app_arch);
+	err |= put_user(app_arch, &data->app_arch);
+	return err;
+}
+
+static int compat_get_qseecom_send_svc_cmd_req(
+		struct compat_qseecom_send_svc_cmd_req __user *data32,
+		struct qseecom_send_svc_cmd_req __user *data)
+{
+	int err;
+	compat_ulong_t cmd_id;
+	compat_uptr_t cmd_req_buf;
+	compat_uint_t cmd_req_len;
+	compat_uptr_t resp_buf;
+	compat_uint_t resp_len;
+
+	err = get_user(cmd_id, &data32->cmd_id);
+	err |= put_user(cmd_id, &data->cmd_id);
+	err |= get_user(cmd_req_buf, &data32->cmd_req_buf);
+	err |= put_user(NULL, &data->cmd_req_buf);
+	err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+	err |= get_user(cmd_req_len, &data32->cmd_req_len);
+	err |= put_user(cmd_req_len, &data->cmd_req_len);
+	err |= get_user(resp_buf, &data32->resp_buf);
+	err |= put_user(NULL, &data->resp_buf);
+	err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	return err;
+}
+
+static int compat_get_qseecom_create_key_req(
+		struct compat_qseecom_create_key_req __user *data32,
+		struct qseecom_create_key_req __user *data)
+{
+	int err;
+	compat_uint_t usage;
+
+	err = copy_in_user(data->hash32, data32->hash32, QSEECOM_HASH_SIZE);
+	err |= get_user(usage, &data32->usage);
+	err |= put_user(usage, &data->usage);
+
+	return err;
+}
+
+static int compat_get_qseecom_wipe_key_req(
+		struct compat_qseecom_wipe_key_req __user *data32,
+		struct qseecom_wipe_key_req __user *data)
+{
+	int err;
+	compat_uint_t usage;
+	compat_int_t wipe_key_flag;
+
+	err = get_user(usage, &data32->usage);
+	err |= put_user(usage, &data->usage);
+	err |= get_user(wipe_key_flag, &data32->wipe_key_flag);
+	err |= put_user(wipe_key_flag, &data->wipe_key_flag);
+
+	return err;
+}
+
+static int compat_get_qseecom_update_key_userinfo_req(
+		struct compat_qseecom_update_key_userinfo_req __user *data32,
+		struct qseecom_update_key_userinfo_req __user *data)
+{
+	int err = 0;
+	compat_uint_t usage;
+
+	err = copy_in_user(data->current_hash32, data32->current_hash32,
+				QSEECOM_HASH_SIZE);
+	err |= copy_in_user(data->new_hash32, data32->new_hash32,
+				QSEECOM_HASH_SIZE);
+	err |= get_user(usage, &data32->usage);
+	err |= put_user(usage, &data->usage);
+
+	return err;
+}
+
+static int compat_get_qseecom_save_partition_hash_req(
+		struct compat_qseecom_save_partition_hash_req __user *data32,
+		struct qseecom_save_partition_hash_req __user *data)
+{
+	int err;
+	compat_int_t partition_id;
+
+	err = get_user(partition_id, &data32->partition_id);
+	err |= put_user(partition_id, &data->partition_id);
+	err |= copy_in_user(data->digest, data32->digest,
+				SHA256_DIGEST_LENGTH);
+	return err;
+}
+
+static int compat_get_qseecom_is_es_activated_req(
+		struct compat_qseecom_is_es_activated_req __user *data32,
+		struct qseecom_is_es_activated_req __user *data)
+{
+	compat_int_t is_activated;
+	int err;
+
+	err = get_user(is_activated, &data32->is_activated);
+	err |= put_user(is_activated, &data->is_activated);
+	return err;
+}
+
+static int compat_get_qseecom_mdtp_cipher_dip_req(
+		struct compat_qseecom_mdtp_cipher_dip_req __user *data32,
+		struct qseecom_mdtp_cipher_dip_req __user *data)
+{
+	int err;
+	compat_int_t in_buf_size;
+	compat_uptr_t in_buf;
+	compat_int_t out_buf_size;
+	compat_uptr_t out_buf;
+	compat_int_t direction;
+
+	err = get_user(in_buf_size, &data32->in_buf_size);
+	err |= put_user(in_buf_size, &data->in_buf_size);
+	err |= get_user(out_buf_size, &data32->out_buf_size);
+	err |= put_user(out_buf_size, &data->out_buf_size);
+	err |= get_user(direction, &data32->direction);
+	err |= put_user(direction, &data->direction);
+	err |= get_user(in_buf, &data32->in_buf);
+	err |= put_user(NULL, &data->in_buf);
+	err |= put_user(in_buf, (compat_uptr_t *)&data->in_buf);
+	err |= get_user(out_buf, &data32->out_buf);
+	err |= put_user(NULL, &data->out_buf);
+	err |= put_user(out_buf, (compat_uptr_t *)&data->out_buf);
+
+	return err;
+}
+
+static int compat_get_qseecom_send_modfd_listener_resp(
+		struct compat_qseecom_send_modfd_listener_resp __user *data32,
+		struct qseecom_send_modfd_listener_resp __user *data)
+{
+	int err;
+	unsigned int i;
+	compat_uptr_t resp_buf_ptr;
+	compat_uint_t resp_len;
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+
+	err = get_user(resp_buf_ptr, &data32->resp_buf_ptr);
+	err |= put_user(NULL, &data->resp_buf_ptr);
+	err |= put_user(resp_buf_ptr, (compat_uptr_t *)&data->resp_buf_ptr);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		err |= get_user(fd, &data32->ifd_data[i].fd);
+		err |= put_user(fd, &data->ifd_data[i].fd);
+		err |= get_user(cmd_buf_offset,
+				&data32->ifd_data[i].cmd_buf_offset);
+		err |= put_user(cmd_buf_offset,
+				&data->ifd_data[i].cmd_buf_offset);
+	}
+	return err;
+}
+
+
+static int compat_get_qseecom_qteec_req(
+		struct compat_qseecom_qteec_req __user *data32,
+		struct qseecom_qteec_req __user *data)
+{
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+	int err;
+
+	err = get_user(req_ptr, &data32->req_ptr);
+	err |= put_user(NULL, &data->req_ptr);
+	err |= put_user(req_ptr, (compat_uptr_t *)&data->req_ptr);
+	err |= get_user(req_len, &data32->req_len);
+	err |= put_user(req_len, &data->req_len);
+
+	err |= get_user(resp_ptr, &data32->resp_ptr);
+	err |= put_user(NULL, &data->resp_ptr);
+	err |= put_user(resp_ptr, (compat_uptr_t *)&data->resp_ptr);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	return err;
+}
+
+static int compat_get_qseecom_qteec_modfd_req(
+		struct compat_qseecom_qteec_modfd_req __user *data32,
+		struct qseecom_qteec_modfd_req __user *data)
+{
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+	int err, i;
+
+	err = get_user(req_ptr, &data32->req_ptr);
+	err |= put_user(NULL, &data->req_ptr);
+	err |= put_user(req_ptr, (compat_uptr_t *)&data->req_ptr);
+	err |= get_user(req_len, &data32->req_len);
+	err |= put_user(req_len, &data->req_len);
+
+	err |= get_user(resp_ptr, &data32->resp_ptr);
+	err |= put_user(NULL, &data->resp_ptr);
+	err |= put_user(resp_ptr, (compat_uptr_t *)&data->resp_ptr);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		err |= get_user(fd, &data32->ifd_data[i].fd);
+		err |= put_user(fd, &data->ifd_data[i].fd);
+		err |= get_user(cmd_buf_offset,
+				&data32->ifd_data[i].cmd_buf_offset);
+		err |= put_user(cmd_buf_offset,
+				&data->ifd_data[i].cmd_buf_offset);
+	}
+	return err;
+}
+
+static int compat_get_int(compat_int_t __user *data32,
+		int __user *data)
+{
+	compat_int_t x;
+	int err;
+
+	err = get_user(x, data32);
+	err |= put_user(x, data);
+	return err;
+}
+
+static int compat_put_qseecom_load_img_req(
+		struct compat_qseecom_load_img_req __user *data32,
+		struct qseecom_load_img_req __user *data)
+{
+	int err;
+	compat_ulong_t mdt_len;
+	compat_ulong_t img_len;
+	compat_long_t ifd_data_fd;
+	compat_ulong_t app_arch;
+	compat_int_t app_id;
+
+	err = get_user(mdt_len, &data->mdt_len);
+	err |= put_user(mdt_len, &data32->mdt_len);
+	err |= get_user(img_len, &data->img_len);
+	err |= put_user(img_len, &data32->img_len);
+	err |= get_user(ifd_data_fd, &data->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= copy_in_user(data32->img_name, data->img_name,
+				MAX_APP_NAME_SIZE);
+	err |= get_user(app_arch, &data->app_arch);
+	err |= put_user(app_arch, &data32->app_arch);
+	err |= get_user(app_id, &data->app_id);
+	err |= put_user(app_id, &data32->app_id);
+	return err;
+}
+
+static int compat_put_qseecom_qseos_version_req(
+		struct compat_qseecom_qseos_version_req __user *data32,
+		struct qseecom_qseos_version_req __user *data)
+{
+	compat_uint_t qseos_version;
+	int err;
+
+	err = get_user(qseos_version, &data->qseos_version);
+	err |= put_user(qseos_version, &data32->qseos_version);
+	return err;
+}
+
+static int compat_put_qseecom_qseos_app_load_query(
+		struct compat_qseecom_qseos_app_load_query __user *data32,
+		struct qseecom_qseos_app_load_query __user *data)
+{
+	int err = 0;
+	unsigned int i;
+	compat_int_t app_id;
+	compat_ulong_t app_arch;
+	char app_name;
+
+	for (i = 0; i < MAX_APP_NAME_SIZE; i++) {
+		err |= get_user(app_name, &(data->app_name[i]));
+		err |= put_user(app_name, &(data32->app_name[i]));
+	}
+	err |= get_user(app_id, &data->app_id);
+	err |= put_user(app_id, &data32->app_id);
+	err |= get_user(app_arch, &data->app_arch);
+	err |= put_user(app_arch, &data32->app_arch);
+
+	return err;
+}
+
+static int compat_put_qseecom_is_es_activated_req(
+		struct compat_qseecom_is_es_activated_req __user *data32,
+		struct qseecom_is_es_activated_req __user *data)
+{
+	compat_int_t is_activated;
+	int err;
+
+	err = get_user(is_activated, &data->is_activated);
+	err |= put_user(is_activated, &data32->is_activated);
+	return err;
+}
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+	switch (cmd) {
+	case COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ:
+		return QSEECOM_IOCTL_REGISTER_LISTENER_REQ;
+	case COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ:
+		return QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ;
+	case COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ:
+		return QSEECOM_IOCTL_LOAD_APP_REQ;
+	case COMPAT_QSEECOM_IOCTL_RECEIVE_REQ:
+		return QSEECOM_IOCTL_RECEIVE_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ:
+		return QSEECOM_IOCTL_SEND_RESP_REQ;
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ:
+		return QSEECOM_IOCTL_UNLOAD_APP_REQ;
+	case COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ:
+		return QSEECOM_IOCTL_PERF_ENABLE_REQ;
+	case COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ:
+		return QSEECOM_IOCTL_PERF_DISABLE_REQ;
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ:
+		return QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ;
+	case COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ:
+		return QSEECOM_IOCTL_SET_BUS_SCALING_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ:
+		return QSEECOM_IOCTL_SEND_CMD_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+		return QSEECOM_IOCTL_SEND_MODFD_CMD_REQ;
+	case COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ:
+		return QSEECOM_IOCTL_SET_MEM_PARAM_REQ;
+	case COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ:
+		return QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ;
+	case COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ:
+		return QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ;
+	case COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ:
+		return QSEECOM_IOCTL_APP_LOADED_QUERY_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ:
+		return QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ;
+	case COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ:
+		return QSEECOM_IOCTL_CREATE_KEY_REQ;
+	case COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ:
+		return QSEECOM_IOCTL_WIPE_KEY_REQ;
+	case COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ:
+		return QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ;
+	case COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ:
+		return QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ;
+	case COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ:
+		return QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP:
+		return QSEECOM_IOCTL_SEND_MODFD_RESP;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ:
+		return QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ:
+		return QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ:
+		return QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ:
+		return QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ;
+	case COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ:
+		return QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ:
+		return QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64:
+		return QSEECOM_IOCTL_SEND_MODFD_RESP_64;
+
+	default:
+		return cmd;
+	}
+}
+
+long compat_qseecom_ioctl(struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	long ret;
+
+	switch (cmd) {
+
+	case COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ:
+	case COMPAT_QSEECOM_IOCTL_RECEIVE_REQ:
+	case COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ:
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ:
+	case COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ:
+	case COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ:
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
+		return qseecom_ioctl(file, convert_cmd(cmd), 0);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
+		struct compat_qseecom_register_listener_req __user *data32;
+		struct qseecom_register_listener_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_register_listener_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ: {
+		struct compat_qseecom_load_img_req __user *data32;
+		struct qseecom_load_img_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_load_img_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+		err = compat_put_qseecom_load_img_req(data32, data);
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ: {
+		struct compat_qseecom_send_cmd_req __user *data32;
+		struct qseecom_send_cmd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_cmd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
+		struct compat_qseecom_send_modfd_cmd_req __user *data32;
+		struct qseecom_send_modfd_cmd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_modfd_cmd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
+		struct compat_qseecom_set_sb_mem_param_req __user *data32;
+		struct qseecom_set_sb_mem_param_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_set_sb_mem_param_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
+		struct compat_qseecom_qseos_version_req __user *data32;
+		struct qseecom_qseos_version_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qseos_version_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+		err = compat_put_qseecom_qseos_version_req(data32, data);
+
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
+		compat_int_t __user *data32;
+		int __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+		err = compat_get_int(data32, data);
+		if (err)
+			return err;
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
+		struct compat_qseecom_load_img_req __user *data32;
+		struct qseecom_load_img_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_load_img_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+		struct compat_qseecom_qseos_app_load_query __user *data32;
+		struct qseecom_qseos_app_load_query __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qseos_app_load_query(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+					(unsigned long)data);
+		err = compat_put_qseecom_qseos_app_load_query(data32, data);
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
+		struct compat_qseecom_send_svc_cmd_req __user *data32;
+		struct qseecom_send_svc_cmd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_svc_cmd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ: {
+		struct compat_qseecom_create_key_req __user *data32;
+		struct qseecom_create_key_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_create_key_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ: {
+		struct compat_qseecom_wipe_key_req __user *data32;
+		struct qseecom_wipe_key_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_wipe_key_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
+		struct compat_qseecom_update_key_userinfo_req __user *data32;
+		struct qseecom_update_key_userinfo_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_update_key_userinfo_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+		struct compat_qseecom_save_partition_hash_req __user *data32;
+		struct qseecom_save_partition_hash_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_save_partition_hash_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+		struct compat_qseecom_is_es_activated_req __user *data32;
+		struct qseecom_is_es_activated_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_is_es_activated_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+		err = compat_put_qseecom_is_es_activated_req(data32, data);
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
+		struct compat_qseecom_mdtp_cipher_dip_req __user *data32;
+		struct qseecom_mdtp_cipher_dip_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_mdtp_cipher_dip_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP:
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
+		struct compat_qseecom_send_modfd_listener_resp __user *data32;
+		struct qseecom_send_modfd_listener_resp __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_modfd_listener_resp(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
+		struct compat_qseecom_qteec_req __user *data32;
+		struct qseecom_qteec_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qteec_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ:
+	case COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ:
+	case COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
+		struct compat_qseecom_qteec_modfd_req __user *data32;
+		struct qseecom_qteec_modfd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qteec_modfd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	default:
+		return -ENOIOCTLCMD;
+	break;
+	}
+	return 0;
+}
diff --git a/drivers/misc/compat_qseecom.h b/drivers/misc/compat_qseecom.h
new file mode 100644
index 0000000..1ad9181
--- /dev/null
+++ b/drivers/misc/compat_qseecom.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017, 2019, The Linux Foundation. All rights reserved.
+ */
+#ifndef _UAPI_COMPAT_QSEECOM_H_
+#define _UAPI_COMPAT_QSEECOM_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/compat.h>
+
+/*
+ * struct compat_qseecom_register_listener_req -
+ *      for register listener ioctl request
+ * @listener_id - service id (shared between userspace and QSE)
+ * @ifd_data_fd - ion handle
+ * @virt_sb_base - shared buffer base in user space
+ * @sb_size - shared buffer size
+ */
+struct compat_qseecom_register_listener_req {
+	compat_ulong_t listener_id; /* in */
+	compat_long_t ifd_data_fd; /* in */
+	compat_uptr_t virt_sb_base; /* in */
+	compat_ulong_t sb_size; /* in */
+};
+
+/*
+ * struct compat_qseecom_send_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ */
+struct compat_qseecom_send_cmd_req {
+	compat_uptr_t cmd_req_buf; /* in */
+	compat_uint_t cmd_req_len; /* in */
+	compat_uptr_t resp_buf; /* in/out */
+	compat_uint_t resp_len; /* in/out */
+};
+
+/*
+ * struct qseecom_ion_fd_info - ion fd handle data information
+ * @fd - ion handle to some memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_ion_fd_info {
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+};
+/*
+ * struct qseecom_send_modfd_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_send_modfd_cmd_req {
+	compat_uptr_t cmd_req_buf; /* in */
+	compat_uint_t cmd_req_len; /* in */
+	compat_uptr_t resp_buf; /* in/out */
+	compat_uint_t resp_len; /* in/out */
+	struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+/*
+ * struct compat_qseecom_listener_send_resp_req
+ * signal to continue the send_cmd req.
+ * Used as a trigger from HLOS service to notify QSEECOM that it's done with its
+ * operation and provide the response for QSEECOM can continue the incomplete
+ * command execution
+ * @resp_len - Length of the response
+ * @resp_buf - Response buffer where the response of the cmd should go.
+ */
+struct compat_qseecom_send_resp_req {
+	compat_uptr_t resp_buf; /* in */
+	compat_uint_t resp_len; /* in */
+};
+
+/*
+ * struct compat_qseecom_load_img_data
+ * for sending image length information and
+ * ion file descriptor to the qseecom driver. ion file descriptor is used
+ * for retrieving the ion file handle and in turn the physical address of
+ * the image location.
+ * @mdt_len - Length of the .mdt file in bytes.
+ * @img_len - Length of the .mdt + .b00 +..+.bxx images files in bytes
+ * @ion_fd - Ion file descriptor used when allocating memory.
+ * @img_name - Name of the image.
+ */
+struct compat_qseecom_load_img_req {
+	compat_ulong_t mdt_len; /* in */
+	compat_ulong_t img_len; /* in */
+	compat_long_t  ifd_data_fd; /* in */
+	char	 img_name[MAX_APP_NAME_SIZE]; /* in */
+	compat_ulong_t app_arch; /* in */
+	compat_uint_t app_id; /* out*/
+};
+
+struct compat_qseecom_set_sb_mem_param_req {
+	compat_long_t ifd_data_fd; /* in */
+	compat_uptr_t virt_sb_base; /* in */
+	compat_ulong_t sb_len; /* in */
+};
+
+/*
+ * struct compat_qseecom_qseos_version_req - get qseos version
+ * @qseos_version - version number
+ */
+struct compat_qseecom_qseos_version_req {
+	compat_uint_t qseos_version; /* in */
+};
+
+/*
+ * struct compat_qseecom_qseos_app_load_query - verify if app is loaded in qsee
+ * @app_name[MAX_APP_NAME_SIZE]-  name of the app.
+ * @app_id - app id.
+ */
+struct compat_qseecom_qseos_app_load_query {
+	char app_name[MAX_APP_NAME_SIZE]; /* in */
+	compat_uint_t app_id; /* out */
+	compat_ulong_t app_arch;
+};
+
+struct compat_qseecom_send_svc_cmd_req {
+	compat_ulong_t cmd_id;
+	compat_uptr_t cmd_req_buf; /* in */
+	compat_uint_t cmd_req_len; /* in */
+	compat_uptr_t resp_buf; /* in/out */
+	compat_uint_t resp_len; /* in/out */
+};
+
+struct compat_qseecom_create_key_req {
+	unsigned char hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+struct compat_qseecom_wipe_key_req {
+	enum qseecom_key_management_usage_type usage;
+	compat_int_t wipe_key_flag;
+};
+
+struct compat_qseecom_update_key_userinfo_req {
+	unsigned char current_hash32[QSEECOM_HASH_SIZE];
+	unsigned char new_hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+/*
+ * struct compat_qseecom_save_partition_hash_req
+ * @partition_id - partition id.
+ * @hash[SHA256_DIGEST_LENGTH] -  sha256 digest.
+ */
+struct compat_qseecom_save_partition_hash_req {
+	compat_int_t partition_id; /* in */
+	char digest[SHA256_DIGEST_LENGTH]; /* in */
+};
+
+/*
+ * struct compat_qseecom_is_es_activated_req
+ * @is_activated - 1=true , 0=false
+ */
+struct compat_qseecom_is_es_activated_req {
+	compat_int_t is_activated; /* out */
+};
+
+/*
+ * struct compat_qseecom_mdtp_cipher_dip_req
+ * @in_buf - input buffer
+ * @in_buf_size - input buffer size
+ * @out_buf - output buffer
+ * @out_buf_size - output buffer size
+ * @direction - 0=encrypt, 1=decrypt
+ */
+struct compat_qseecom_mdtp_cipher_dip_req {
+	compat_uptr_t in_buf;
+	compat_uint_t in_buf_size;
+	compat_uptr_t out_buf;
+	compat_uint_t out_buf_size;
+	compat_uint_t direction;
+};
+
+/*
+ * struct qseecom_send_modfd_resp - for send command ioctl request
+ * @req_len - command buffer length
+ * @req_buf - command buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_send_modfd_listener_resp {
+	compat_uptr_t resp_buf_ptr; /* in */
+	compat_uint_t resp_len; /* in */
+	struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD]; /* in */
+};
+
+struct compat_qseecom_qteec_req {
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+};
+
+struct compat_qseecom_qteec_modfd_req {
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+	struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+struct compat_qseecom_ce_pipe_entry {
+	compat_int_t valid;
+	compat_uint_t ce_num;
+	compat_uint_t ce_pipe_pair;
+};
+
+struct compat_qseecom_ce_info_req {
+	unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+	compat_uint_t usage;
+	compat_uint_t unit_num;
+	compat_uint_t num_ce_pipe_entries;
+	struct compat_qseecom_ce_pipe_entry
+				ce_pipe_entry[MAX_CE_PIPE_PAIR_PER_UNIT];
+};
+
+struct file;
+extern long compat_qseecom_ioctl(struct file *file,
+					unsigned int cmd, unsigned long arg);
+
+#define COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 1, struct compat_qseecom_register_listener_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 2)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 3, struct compat_qseecom_send_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 4, struct compat_qseecom_send_modfd_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_RECEIVE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 5)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 6)
+
+#define COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 7, struct compat_qseecom_load_img_req)
+
+#define COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 8, struct compat_qseecom_set_sb_mem_param_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 9)
+
+#define COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 10, struct compat_qseecom_qseos_version_req)
+
+#define COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 11)
+
+#define COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 12)
+
+#define COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 13, struct compat_qseecom_load_img_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 14)
+
+#define COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 15, struct compat_qseecom_qseos_app_load_query)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 16, struct compat_qseecom_send_svc_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 17, struct compat_qseecom_create_key_req)
+
+#define COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 18, struct compat_qseecom_wipe_key_req)
+
+#define COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 19, \
+				struct compat_qseecom_save_partition_hash_req)
+
+#define COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 20, struct compat_qseecom_is_es_activated_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP \
+	_IOWR(QSEECOM_IOC_MAGIC, 21, \
+				struct compat_qseecom_send_modfd_listener_resp)
+
+#define COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 23, int)
+
+#define COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 24, \
+			struct compat_qseecom_update_key_userinfo_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 30, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 31, struct compat_qseecom_qteec_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 32, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 33, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 34, struct qseecom_mdtp_cipher_dip_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 35, struct compat_qseecom_send_modfd_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64 \
+	_IOWR(QSEECOM_IOC_MAGIC, 36, \
+				struct compat_qseecom_send_modfd_listener_resp)
+#define COMPAT_QSEECOM_IOCTL_GET_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 40, \
+				struct compat_qseecom_ce_info_req)
+#define COMPAT_QSEECOM_IOCTL_FREE_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 41, \
+				struct compat_qseecom_ce_info_req)
+#define COMPAT_QSEECOM_IOCTL_QUERY_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 42, \
+				struct compat_qseecom_ce_info_req)
+
+#endif
+#endif /* _UAPI_COMPAT_QSEECOM_H_ */
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
new file mode 100644
index 0000000..dabb38d
--- /dev/null
+++ b/drivers/misc/qseecom.c
@@ -0,0 +1,9167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QTI Secure Execution Environment Communicator (QSEECOM) driver
+ *
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/msm_ion.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/qseecom.h>
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/freezer.h>
+#include <linux/scatterlist.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/socinfo.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <soc/qcom/qseecomi.h>
+#include <asm/cacheflush.h>
+#include "qseecom_kernel.h"
+#include <crypto/ice.h>
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/dma-buf.h>
+#include <linux/ion_kernel.h>
+#include <linux/compat.h>
+#include "compat_qseecom.h"
+#include <linux/pfk.h>
+
+#define QSEECOM_DEV			"qseecom"
+#define QSEOS_VERSION_14		0x14
+#define QSEEE_VERSION_00		0x400000
+#define QSEE_VERSION_01			0x401000
+#define QSEE_VERSION_02			0x402000
+#define QSEE_VERSION_03			0x403000
+#define QSEE_VERSION_04			0x404000
+#define QSEE_VERSION_05			0x405000
+#define QSEE_VERSION_20			0x800000
+#define QSEE_VERSION_40			0x1000000  /* TZ.BF.4.0 */
+
+#define QSEE_CE_CLK_100MHZ		100000000
+#define CE_CLK_DIV			1000000
+
+#define QSEECOM_MAX_SG_ENTRY			4096
+#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT	\
+			(QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
+
+#define QSEECOM_INVALID_KEY_ID  0xff
+
+/* Save partition image hash for authentication check */
+#define SCM_SAVE_PARTITION_HASH_ID	0x01
+
+/* Check if enterprise security is activate */
+#define SCM_IS_ACTIVATED_ID		0x02
+
+/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
+#define SCM_MDTP_CIPHER_DIP		0x01
+
+/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
+#define MAX_DIP			0x20000
+
+#define RPMB_SERVICE			0x2000
+#define SSD_SERVICE			0x3000
+
+#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT	2000
+#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT	2000
+#define TWO 2
+#define QSEECOM_UFS_ICE_CE_NUM 10
+#define QSEECOM_SDCC_ICE_CE_NUM 20
+#define QSEECOM_ICE_FDE_KEY_INDEX 0
+
+#define PHY_ADDR_4G	(1ULL<<32)
+
+#define QSEECOM_STATE_NOT_READY         0
+#define QSEECOM_STATE_SUSPEND           1
+#define QSEECOM_STATE_READY             2
+#define QSEECOM_ICE_FDE_KEY_SIZE_MASK   2
+
+/*
+ * default ce info unit to 0 for
+ * services which
+ * support only single instance.
+ * Most of services are in this category.
+ */
+#define DEFAULT_CE_INFO_UNIT 0
+#define DEFAULT_NUM_CE_INFO_UNIT 1
+
+#define FDE_FLAG_POS    4
+#define ENABLE_KEY_WRAP_IN_KS    (1 << FDE_FLAG_POS)
+
+enum qseecom_clk_definitions {
+	CLK_DFAB = 0,
+	CLK_SFPB,
+};
+
+enum qseecom_ice_key_size_type {
+	QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
+		(0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+	QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
+		(1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+	QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
+		(0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+};
+
+enum qseecom_client_handle_type {
+	QSEECOM_CLIENT_APP = 1,
+	QSEECOM_LISTENER_SERVICE,
+	QSEECOM_SECURE_SERVICE,
+	QSEECOM_GENERIC,
+	QSEECOM_UNAVAILABLE_CLIENT_APP,
+};
+
+enum qseecom_ce_hw_instance {
+	CLK_QSEE = 0,
+	CLK_CE_DRV,
+	CLK_INVALID,
+};
+
+enum qseecom_cache_ops {
+	QSEECOM_CACHE_CLEAN,
+	QSEECOM_CACHE_INVALIDATE,
+};
+
+static struct class *driver_class;
+static dev_t qseecom_device_no;
+
+static DEFINE_MUTEX(qsee_bw_mutex);
+static DEFINE_MUTEX(app_access_lock);
+static DEFINE_MUTEX(clk_access_lock);
+static DEFINE_MUTEX(listener_access_lock);
+
+
+struct sglist_info {
+	uint32_t indexAndFlags;
+	uint32_t sizeOrCount;
+};
+
+/*
+ * The 31st bit indicates only one or multiple physical address inside
+ * the request buffer. If it is set,  the index locates a single physical addr
+ * inside the request buffer, and `sizeOrCount` is the size of the memory being
+ * shared at that physical address.
+ * Otherwise, the index locates an array of {start, len} pairs (a
+ * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
+ * that array.
+ *
+ * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
+ * and scatter gather entry sizes are 64-bit values.  Otherwise, 32-bit values.
+ *
+ * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
+ */
+#define SGLISTINFO_SET_INDEX_FLAG(c, s, i)	\
+	((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
+
+#define SGLISTINFO_TABLE_SIZE	(sizeof(struct sglist_info) * MAX_ION_FD)
+
+#define FEATURE_ID_WHITELIST	15	/*whitelist feature id*/
+
+#define MAKE_WHITELIST_VERSION(major, minor, patch) \
+	(((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+struct qseecom_registered_listener_list {
+	struct list_head                 list;
+	struct qseecom_register_listener_req svc;
+	void  *user_virt_sb_base;
+	struct dma_buf             *dmabuf;
+	struct dma_buf_attachment  *attach;
+	struct sg_table            *sgt;
+	u8                         *sb_virt;
+	phys_addr_t                sb_phys;
+	size_t                     sb_length;
+	wait_queue_head_t          rcv_req_wq;
+	/* rcv_req_flag: 0: ready and empty; 1: received req */
+	int                        rcv_req_flag;
+	int                        send_resp_flag;
+	bool                       listener_in_use;
+	/* wq for thread blocked on this listener*/
+	wait_queue_head_t          listener_block_app_wq;
+	struct sglist_info         sglistinfo_ptr[MAX_ION_FD];
+	uint32_t                   sglist_cnt;
+	int                        abort;
+	bool                       unregister_pending;
+};
+
+struct qseecom_unregister_pending_list {
+	struct list_head		list;
+	struct qseecom_dev_handle	*data;
+};
+
+struct qseecom_registered_app_list {
+	struct list_head                 list;
+	u32  app_id;
+	u32  ref_cnt;
+	char app_name[MAX_APP_NAME_SIZE];
+	u32  app_arch;
+	bool app_blocked;
+	u32  check_block;
+	u32  blocked_on_listener_id;
+};
+
+struct qseecom_registered_kclient_list {
+	struct list_head list;
+	struct qseecom_handle *handle;
+};
+
+struct qseecom_ce_info_use {
+	unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+	unsigned int unit_num;
+	unsigned int num_ce_pipe_entries;
+	struct qseecom_ce_pipe_entry *ce_pipe_entry;
+	bool alloc;
+	uint32_t type;
+};
+
+struct ce_hw_usage_info {
+	uint32_t qsee_ce_hw_instance;
+	uint32_t num_fde;
+	struct qseecom_ce_info_use *fde;
+	uint32_t num_pfe;
+	struct qseecom_ce_info_use *pfe;
+};
+
+struct qseecom_clk {
+	enum qseecom_ce_hw_instance instance;
+	struct clk *ce_core_clk;
+	struct clk *ce_clk;
+	struct clk *ce_core_src_clk;
+	struct clk *ce_bus_clk;
+	uint32_t clk_access_cnt;
+};
+
+struct qseecom_control {
+	struct list_head  registered_listener_list_head;
+
+	struct list_head  registered_app_list_head;
+	spinlock_t        registered_app_list_lock;
+
+	struct list_head   registered_kclient_list_head;
+	spinlock_t        registered_kclient_list_lock;
+
+	wait_queue_head_t send_resp_wq;
+	int               send_resp_flag;
+
+	uint32_t          qseos_version;
+	uint32_t          qsee_version;
+	struct device *pdev;        /* class_dev */
+	struct device *dev;         /* platform_dev->dev */
+	bool  whitelist_support;
+	bool  commonlib_loaded;
+	bool  commonlib64_loaded;
+	struct ce_hw_usage_info ce_info;
+
+	int qsee_bw_count;
+	int qsee_sfpb_bw_count;
+
+	uint32_t qsee_perf_client;
+	struct qseecom_clk qsee;
+	struct qseecom_clk ce_drv;
+
+	bool support_bus_scaling;
+	bool support_fde;
+	bool support_pfe;
+	bool fde_key_size;
+	uint32_t  cumulative_mode;
+	enum qseecom_bandwidth_request_mode  current_mode;
+	struct timer_list bw_scale_down_timer;
+	struct work_struct bw_inactive_req_ws;
+	struct cdev cdev;
+	bool timer_running;
+	bool no_clock_support;
+	unsigned int ce_opp_freq_hz;
+	bool appsbl_qseecom_support;
+	uint32_t qsee_reentrancy_support;
+	bool enable_key_wrap_in_ks;
+
+	uint32_t app_block_ref_cnt;
+	wait_queue_head_t app_block_wq;
+	atomic_t qseecom_state;
+	int is_apps_region_protected;
+	bool smcinvoke_support;
+
+	struct list_head  unregister_lsnr_pending_list_head;
+	wait_queue_head_t register_lsnr_pending_wq;
+};
+
+struct qseecom_sec_buf_fd_info {
+	bool is_sec_buf_fd;
+	size_t size;
+	void *vbase;
+	dma_addr_t pbase;
+};
+
+struct qseecom_param_memref {
+	uint32_t buffer;
+	uint32_t size;
+};
+
+struct qseecom_client_handle {
+	u32  app_id;
+	struct dma_buf *dmabuf;
+	struct dma_buf_attachment  *attach;
+	struct sg_table *sgt;
+	u8 *sb_virt;
+	phys_addr_t sb_phys;
+	size_t sb_length;
+	unsigned long user_virt_sb_base;
+	char app_name[MAX_APP_NAME_SIZE];
+	u32  app_arch;
+	struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
+};
+
+struct qseecom_listener_handle {
+	u32               id;
+	bool              unregister_pending;
+};
+
+static struct qseecom_control qseecom;
+
+struct qseecom_dev_handle {
+	enum qseecom_client_handle_type type;
+	union {
+		struct qseecom_client_handle client;
+		struct qseecom_listener_handle listener;
+	};
+	bool released;
+	int               abort;
+	wait_queue_head_t abort_wq;
+	atomic_t          ioctl_count;
+	bool  perf_enabled;
+	bool  fast_load_enabled;
+	enum qseecom_bandwidth_request_mode mode;
+	struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+	uint32_t sglist_cnt;
+	bool use_legacy_cmd;
+};
+
+struct qseecom_key_id_usage_desc {
+	uint8_t desc[QSEECOM_KEY_ID_SIZE];
+};
+
+struct qseecom_crypto_info {
+	unsigned int unit_num;
+	unsigned int ce;
+	unsigned int pipe_pair;
+};
+
+static struct qseecom_key_id_usage_desc key_id_array[] = {
+	{
+		.desc = "Undefined Usage Index",
+	},
+
+	{
+		.desc = "Full Disk Encryption",
+	},
+
+	{
+		.desc = "Per File Encryption",
+	},
+
+	{
+		.desc = "UFS ICE Full Disk Encryption",
+	},
+
+	{
+		.desc = "SDCC ICE Full Disk Encryption",
+	},
+};
+
+/* Function proto types */
+static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
+static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
+					char *cmnlib_name);
+static int qseecom_enable_ice_setup(int usage);
+static int qseecom_disable_ice_setup(int usage);
+static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
+static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+
+static int get_qseecom_keymaster_status(char *str)
+{
+	get_option(&str, &qseecom.is_apps_region_protected);
+	return 1;
+}
+__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
+
+static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
+			const void *req_buf, void *resp_buf)
+{
+	int      ret = 0;
+	uint32_t smc_id = 0;
+	uint32_t qseos_cmd_id = 0;
+	struct scm_desc desc = {0};
+	struct qseecom_command_scm_resp *scm_resp = NULL;
+
+	if (!req_buf || !resp_buf) {
+		pr_err("Invalid buffer pointer\n");
+		return -EINVAL;
+	}
+	qseos_cmd_id = *(uint32_t *)req_buf;
+	scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
+
+	switch (svc_id) {
+	case SCM_SVC_INFO: {
+		if (tz_cmd_id == 3) {
+			smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
+			desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
+			desc.args[0] = *(uint32_t *)req_buf;
+		} else {
+			pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
+				svc_id, tz_cmd_id);
+			return -EINVAL;
+		}
+		ret = scm_call2(smc_id, &desc);
+		break;
+	}
+	case SCM_SVC_ES: {
+		switch (tz_cmd_id) {
+		case SCM_SAVE_PARTITION_HASH_ID: {
+			u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
+			struct qseecom_save_partition_hash_req *p_hash_req =
+				(struct qseecom_save_partition_hash_req *)
+				req_buf;
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, p_hash_req->digest,
+				SHA256_DIGEST_LENGTH);
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
+			desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
+			desc.args[0] = p_hash_req->partition_id;
+			desc.args[1] = virt_to_phys(tzbuf);
+			desc.args[2] = SHA256_DIGEST_LENGTH;
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		default: {
+			pr_err("tz_cmd_id %d is not supported by scm_call2\n",
+						tz_cmd_id);
+			ret = -EINVAL;
+			break;
+		}
+		} /* end of switch (tz_cmd_id) */
+		break;
+	} /* end of case SCM_SVC_ES */
+	case SCM_SVC_TZSCHEDULER: {
+		switch (qseos_cmd_id) {
+		case QSEOS_APP_START_COMMAND: {
+			struct qseecom_load_app_ireq *req;
+			struct qseecom_load_app_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_APP_START_ID;
+			desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_app_ireq *)req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+					(struct qseecom_load_app_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_APP_SHUTDOWN_COMMAND: {
+			struct qseecom_unload_app_ireq *req;
+
+			req = (struct qseecom_unload_app_ireq *)req_buf;
+			smc_id = TZ_OS_APP_SHUTDOWN_ID;
+			desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
+			desc.args[0] = req->app_id;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_APP_LOOKUP_COMMAND: {
+			struct qseecom_check_app_ireq *req;
+			u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			req = (struct qseecom_check_app_ireq *)req_buf;
+			pr_debug("Lookup app_name = %s\n", req->app_name);
+			strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_APP_LOOKUP_ID;
+			desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = strlen(req->app_name);
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_APP_REGION_NOTIFICATION: {
+			struct qsee_apps_region_info_ireq *req;
+			struct qsee_apps_region_info_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
+			desc.arginfo =
+				TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qsee_apps_region_info_ireq *)
+					req_buf;
+				desc.args[0] = req->addr;
+				desc.args[1] = req->size;
+			} else {
+				req_64bit =
+				(struct qsee_apps_region_info_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->addr;
+				desc.args[1] = req_64bit->size;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
+			struct qseecom_load_lib_image_ireq *req;
+			struct qseecom_load_lib_image_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
+			desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_lib_image_ireq *)
+					req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+				(struct qseecom_load_lib_image_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
+			smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
+			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_REGISTER_LISTENER: {
+			struct qseecom_register_listener_ireq *req;
+			struct qseecom_register_listener_64bit_ireq *req_64bit;
+
+			desc.arginfo =
+				TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_register_listener_ireq *)
+					req_buf;
+				desc.args[0] = req->listener_id;
+				desc.args[1] = req->sb_ptr;
+				desc.args[2] = req->sb_len;
+			} else {
+				req_64bit =
+				(struct qseecom_register_listener_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->listener_id;
+				desc.args[1] = req_64bit->sb_ptr;
+				desc.args[2] = req_64bit->sb_len;
+			}
+			qseecom.smcinvoke_support = true;
+			smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
+			ret = scm_call2(smc_id, &desc);
+			if (ret && ret != -EBUSY) {
+				qseecom.smcinvoke_support = false;
+				smc_id = TZ_OS_REGISTER_LISTENER_ID;
+				ret = scm_call2(smc_id, &desc);
+			}
+			break;
+		}
+		case QSEOS_DEREGISTER_LISTENER: {
+			struct qseecom_unregister_listener_ireq *req;
+
+			req = (struct qseecom_unregister_listener_ireq *)
+				req_buf;
+			smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
+			desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
+			desc.args[0] = req->listener_id;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LISTENER_DATA_RSP_COMMAND: {
+			struct qseecom_client_listener_data_irsp *req;
+
+			req = (struct qseecom_client_listener_data_irsp *)
+				req_buf;
+			smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
+			desc.arginfo =
+				TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
+			desc.args[0] = req->listener_id;
+			desc.args[1] = req->status;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
+			struct qseecom_client_listener_data_irsp *req;
+			struct qseecom_client_listener_data_64bit_irsp *req_64;
+
+			smc_id =
+			TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req =
+				(struct qseecom_client_listener_data_irsp *)
+				req_buf;
+				desc.args[0] = req->listener_id;
+				desc.args[1] = req->status;
+				desc.args[2] = req->sglistinfo_ptr;
+				desc.args[3] = req->sglistinfo_len;
+			} else {
+				req_64 =
+			(struct qseecom_client_listener_data_64bit_irsp *)
+				req_buf;
+				desc.args[0] = req_64->listener_id;
+				desc.args[1] = req_64->status;
+				desc.args[2] = req_64->sglistinfo_ptr;
+				desc.args[3] = req_64->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
+			struct qseecom_load_app_ireq *req;
+			struct qseecom_load_app_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
+			desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_app_ireq *)req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+				(struct qseecom_load_app_64bit_ireq *)req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
+			smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
+			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+			}
+
+		case QSEOS_CLIENT_SEND_DATA_COMMAND: {
+			struct qseecom_client_send_data_ireq *req;
+			struct qseecom_client_send_data_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
+			desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_client_send_data_ireq *)
+					req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->rsp_ptr;
+				desc.args[4] = req->rsp_len;
+			} else {
+				req_64bit =
+				(struct qseecom_client_send_data_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->rsp_ptr;
+				desc.args[4] = req_64bit->rsp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
+			struct qseecom_client_send_data_ireq *req;
+			struct qseecom_client_send_data_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_client_send_data_ireq *)
+					req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->rsp_ptr;
+				desc.args[4] = req->rsp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit =
+				(struct qseecom_client_send_data_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->rsp_ptr;
+				desc.args[4] = req_64bit->rsp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
+			struct qseecom_client_send_service_ireq *req;
+
+			req = (struct qseecom_client_send_service_ireq *)
+				req_buf;
+			smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
+			desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
+			desc.args[0] = req->key_type;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_ERASE_COMMAND: {
+			smc_id = TZ_OS_RPMB_ERASE_ID;
+			desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
+			smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
+			desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_GENERATE_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_generate_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_generate_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_GEN_KEY_ID;
+			desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_DELETE_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_delete_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_delete_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_DEL_KEY_ID;
+			desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_SET_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_select_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_select_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
+			desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_UPDATE_KEY_USERINFO: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_userinfo_update_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
+				(struct qseecom_key_userinfo_update_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_UPDATE_KEY_ID;
+			desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_TEE_OPEN_SESSION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
+			desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_INVOKE_COMMAND: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
+			desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_CLOSE_SESSION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
+			desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_REQUEST_CANCELLATION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
+			desc.arginfo =
+				TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
+			struct qseecom_continue_blocked_request_ireq *req =
+				(struct qseecom_continue_blocked_request_ireq *)
+				req_buf;
+			if (qseecom.smcinvoke_support)
+				smc_id =
+				TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
+			else
+				smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
+			desc.arginfo =
+				TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
+			desc.args[0] = req->app_or_session_id;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		default: {
+			pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
+						qseos_cmd_id);
+			ret = -EINVAL;
+			break;
+		}
+		} /*end of switch (qsee_cmd_id)  */
+	break;
+	} /*end of case SCM_SVC_TZSCHEDULER*/
+	default: {
+		pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
+					svc_id);
+		ret = -EINVAL;
+		break;
+	}
+	} /*end of switch svc_id */
+	scm_resp->result = desc.ret[0];
+	scm_resp->resp_type = desc.ret[1];
+	scm_resp->data = desc.ret[2];
+	pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
+		svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
+	pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
+		scm_resp->result, scm_resp->resp_type, scm_resp->data);
+	return ret;
+}
+
+static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
+		size_t cmd_len, void *resp_buf, size_t resp_len)
+{
+	return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
+}
+
+static struct qseecom_registered_listener_list *__qseecom_find_svc(
+						int32_t listener_id)
+{
+	struct qseecom_registered_listener_list *entry = NULL;
+
+	list_for_each_entry(entry,
+			&qseecom.registered_listener_list_head, list) {
+		if (entry->svc.listener_id == listener_id)
+			break;
+	}
+	if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
+		pr_debug("Service id: %u is not found\n", listener_id);
+		return NULL;
+	}
+
+	return entry;
+}
+
+static int qseecom_dmabuf_cache_operations(struct dma_buf *dmabuf,
+					enum qseecom_cache_ops cache_op)
+{
+	int ret = 0;
+	unsigned long flags = 0;
+
+	if (!dmabuf) {
+		pr_err("dmabuf is NULL\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = dma_buf_get_flags(dmabuf, &flags);
+	if (ret) {
+		pr_err("Failed to get dma buf flags: %d\n", ret);
+		goto exit;
+	}
+	if (!(flags & ION_FLAG_CACHED))
+		goto exit;
+
+	switch (cache_op) {
+	case QSEECOM_CACHE_CLEAN:
+		dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
+		dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
+		break;
+	case QSEECOM_CACHE_INVALIDATE:
+		dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
+		dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
+		break;
+	default:
+		pr_err("cache (%d) operation not supported\n",
+			 cache_op);
+		ret = -EINVAL;
+		goto exit;
+	}
+exit:
+	return ret;
+}
+
+static int qseecom_dmabuf_map(int ion_fd, struct sg_table **sgt,
+				struct dma_buf_attachment **attach,
+				struct dma_buf **dmabuf)
+{
+	struct dma_buf *new_dma_buf = NULL;
+	struct dma_buf_attachment *new_attach = NULL;
+	struct sg_table *new_sgt = NULL;
+	int ret = 0;
+
+	new_dma_buf = dma_buf_get(ion_fd);
+	if (IS_ERR_OR_NULL(new_dma_buf)) {
+		pr_err("dma_buf_get() for ion_fd %d failed\n", ion_fd);
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	new_attach = dma_buf_attach(new_dma_buf, qseecom.dev);
+	if (IS_ERR_OR_NULL(new_attach)) {
+		pr_err("dma_buf_attach() for ion_fd %d failed\n", ion_fd);
+		ret = -ENOMEM;
+		goto err_put;
+	}
+
+	new_sgt = dma_buf_map_attachment(new_attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(new_sgt)) {
+		ret = PTR_ERR(new_sgt);
+		pr_err("dma_buf_map_attachment for ion_fd %d failed ret = %d\n",
+				ion_fd, ret);
+		goto err_detach;
+	}
+	*sgt = new_sgt;
+	*attach = new_attach;
+	*dmabuf = new_dma_buf;
+	return ret;
+
+err_detach:
+	dma_buf_detach(new_dma_buf, new_attach);
+err_put:
+	dma_buf_put(new_dma_buf);
+err:
+	return ret;
+}
+
+static void qseecom_dmabuf_unmap(struct sg_table *sgt,
+			struct dma_buf_attachment *attach,
+			struct dma_buf *dmabuf)
+{
+	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+	dma_buf_detach(dmabuf, attach);
+	dma_buf_put(dmabuf);
+}
+
+/* convert ion_fd to phys_adds and virt_addr*/
+static int qseecom_vaddr_map(int ion_fd,
+			phys_addr_t *paddr, void **vaddr,
+			struct sg_table **sgt,
+			struct dma_buf_attachment **attach,
+			size_t *sb_length, struct dma_buf **dmabuf)
+{
+	struct dma_buf *new_dma_buf = NULL;
+	struct dma_buf_attachment *new_attach = NULL;
+	struct sg_table *new_sgt = NULL;
+	void *new_va = NULL;
+	int ret = 0;
+
+	ret = qseecom_dmabuf_map(ion_fd, &new_sgt, &new_attach, &new_dma_buf);
+	if (ret) {
+		pr_err("qseecom_dmabuf_map for ion_fd %d failed ret = %d\n",
+				ion_fd, ret);
+		goto err;
+	}
+
+	*paddr = sg_dma_address(new_sgt->sgl);
+	*sb_length = new_sgt->sgl->length;
+
+	dma_buf_begin_cpu_access(new_dma_buf, DMA_BIDIRECTIONAL);
+	new_va = dma_buf_kmap(new_dma_buf, 0);
+	if (IS_ERR_OR_NULL(new_va)) {
+		pr_err("dma_buf_kmap failed\n");
+		ret = -ENOMEM;
+		goto err_unmap;
+	}
+	*dmabuf = new_dma_buf;
+	*attach = new_attach;
+	*sgt = new_sgt;
+	*vaddr = new_va;
+	return ret;
+
+err_unmap:
+	dma_buf_end_cpu_access(new_dma_buf, DMA_BIDIRECTIONAL);
+	qseecom_dmabuf_unmap(new_sgt, new_attach, new_dma_buf);
+err:
+	return ret;
+}
+
+static void qseecom_vaddr_unmap(void *vaddr, struct sg_table *sgt,
+		struct dma_buf_attachment *attach,
+		struct dma_buf *dmabuf)
+{
+	dma_buf_kunmap(dmabuf, 0, vaddr);
+	dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
+	qseecom_dmabuf_unmap(sgt, attach, dmabuf);
+}
+
+static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
+				struct qseecom_dev_handle *handle,
+				struct qseecom_register_listener_req *listener)
+{
+	int ret = 0;
+	struct qseecom_register_listener_ireq req;
+	struct qseecom_register_listener_64bit_ireq req_64bit;
+	struct qseecom_command_scm_resp resp;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+
+	ret = qseecom_vaddr_map(listener->ifd_data_fd,
+				&svc->sb_phys, (void **)&svc->sb_virt,
+				&svc->sgt, &svc->attach,
+				&svc->sb_length, &svc->dmabuf);
+	if (ret) {
+		pr_err("failed to convert ion_fd %d for lsnr %d with err: %d\n",
+			listener->ifd_data_fd, svc->svc.listener_id, ret);
+		return -EINVAL;
+	}
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+		req.listener_id = svc->svc.listener_id;
+		req.sb_len = svc->sb_length;
+		req.sb_ptr = (uint32_t)svc->sb_phys;
+		cmd_buf = (void *)&req;
+		cmd_len = sizeof(struct qseecom_register_listener_ireq);
+	} else {
+		req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+		req_64bit.listener_id = svc->svc.listener_id;
+		req_64bit.sb_len = svc->sb_length;
+		req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
+		cmd_buf = (void *)&req_64bit;
+		cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
+	}
+
+	resp.result = QSEOS_RESULT_INCOMPLETE;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+					 &resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed with err: %d\n", ret);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err("Error SB registration req: resp.result = %d\n",
+			resp.result);
+		ret = -EPERM;
+		goto err;
+	}
+	return 0;
+err:
+	if (svc->dmabuf)
+		qseecom_vaddr_unmap(svc->sb_virt, svc->sgt, svc->attach,
+			svc->dmabuf);
+	return ret;
+}
+
+static int qseecom_register_listener(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_register_listener_req rcvd_lstnr;
+	struct qseecom_registered_listener_list *new_entry;
+	struct qseecom_registered_listener_list *ptr_svc;
+
+	ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
+			rcvd_lstnr.sb_size))
+		return -EFAULT;
+
+	data->listener.id = rcvd_lstnr.listener_id;
+
+	ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
+	if (ptr_svc) {
+		if (!ptr_svc->unregister_pending) {
+			pr_err("Service %d is not unique\n",
+				rcvd_lstnr.listener_id);
+		data->released = true;
+		return -EBUSY;
+		} else {
+			/*wait until listener is unregistered*/
+			pr_debug("register %d has to wait\n",
+				rcvd_lstnr.listener_id);
+			mutex_unlock(&listener_access_lock);
+			ret = wait_event_freezable(
+				qseecom.register_lsnr_pending_wq,
+				list_empty(
+				&qseecom.unregister_lsnr_pending_list_head));
+			if (ret) {
+				pr_err("interrupted register_pending_wq %d\n",
+						rcvd_lstnr.listener_id);
+				mutex_lock(&listener_access_lock);
+				return -ERESTARTSYS;
+			}
+			mutex_lock(&listener_access_lock);
+		}
+	}
+	new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+	if (!new_entry)
+		return -ENOMEM;
+	memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
+	new_entry->rcv_req_flag = 0;
+
+	new_entry->svc.listener_id = rcvd_lstnr.listener_id;
+	new_entry->sb_length = rcvd_lstnr.sb_size;
+	new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
+	if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
+		pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
+				rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
+		kzfree(new_entry);
+		return -ENOMEM;
+	}
+
+	init_waitqueue_head(&new_entry->rcv_req_wq);
+	init_waitqueue_head(&new_entry->listener_block_app_wq);
+	new_entry->send_resp_flag = 0;
+	new_entry->listener_in_use = false;
+	list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
+
+	pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id);
+	return ret;
+}
+
+static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	int ret = 0;
+	struct qseecom_register_listener_ireq req;
+	struct qseecom_command_scm_resp resp;
+
+	req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
+	req.listener_id = data->listener.id;
+	resp.result = QSEOS_RESULT_INCOMPLETE;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+					sizeof(req), &resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
+				ret, data->listener.id);
+		if (ret == -EBUSY)
+			return ret;
+		goto exit;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
+				resp.result, data->listener.id);
+		ret = -EPERM;
+		goto exit;
+	}
+
+	data->abort = 1;
+	wake_up_all(&ptr_svc->rcv_req_wq);
+
+	while (atomic_read(&data->ioctl_count) > 1) {
+		if (wait_event_freezable(data->abort_wq,
+				atomic_read(&data->ioctl_count) <= 1)) {
+			pr_err("Interrupted from abort\n");
+			ret = -ERESTARTSYS;
+		}
+	}
+
+exit:
+	if (ptr_svc->dmabuf)
+		qseecom_vaddr_unmap(ptr_svc->sb_virt,
+			ptr_svc->sgt, ptr_svc->attach, ptr_svc->dmabuf);
+
+	list_del(&ptr_svc->list);
+	kzfree(ptr_svc);
+
+	data->released = true;
+	pr_debug("Service %d is unregistered\n", data->listener.id);
+	return ret;
+}
+
+static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
+{
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	struct qseecom_unregister_pending_list *entry = NULL;
+
+	ptr_svc = __qseecom_find_svc(data->listener.id);
+	if (!ptr_svc) {
+		pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
+		return -ENODATA;
+	}
+	/* stop CA thread waiting for listener response */
+	ptr_svc->abort = 1;
+	wake_up_interruptible_all(&qseecom.send_resp_wq);
+
+	/* return directly if pending*/
+	if (ptr_svc->unregister_pending)
+		return 0;
+
+	/*add unregistration into pending list*/
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+	entry->data = data;
+	list_add_tail(&entry->list,
+		&qseecom.unregister_lsnr_pending_list_head);
+	ptr_svc->unregister_pending = true;
+	pr_debug("unregister %d pending\n", data->listener.id);
+	return 0;
+}
+
+static void __qseecom_processing_pending_lsnr_unregister(void)
+{
+	struct qseecom_unregister_pending_list *entry = NULL;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	struct list_head *pos;
+	int ret = 0;
+
+	mutex_lock(&listener_access_lock);
+	while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
+		pos = qseecom.unregister_lsnr_pending_list_head.next;
+		entry = list_entry(pos,
+				struct qseecom_unregister_pending_list, list);
+		if (entry && entry->data) {
+			pr_debug("process pending unregister %d\n",
+					entry->data->listener.id);
+			ptr_svc = __qseecom_find_svc(
+						entry->data->listener.id);
+			if (ptr_svc) {
+				ret = __qseecom_unregister_listener(
+						entry->data, ptr_svc);
+				if (ret == -EBUSY) {
+					pr_debug("unregister %d pending again\n",
+						entry->data->listener.id);
+					mutex_unlock(&listener_access_lock);
+					return;
+				}
+			} else
+				pr_err("invalid listener %d\n",
+					entry->data->listener.id);
+			kzfree(entry->data);
+		}
+		list_del(pos);
+		kzfree(entry);
+	}
+	mutex_unlock(&listener_access_lock);
+	wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
+}
+
+static int __qseecom_set_msm_bus_request(uint32_t mode)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	if (qclk->ce_core_src_clk != NULL) {
+		if (mode == INACTIVE) {
+			__qseecom_disable_clk(CLK_QSEE);
+		} else {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				pr_err("CLK enabling failed (%d) MODE (%d)\n",
+							ret, mode);
+		}
+	}
+
+	if ((!ret) && (qseecom.current_mode != mode)) {
+		ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, mode);
+		if (ret) {
+			pr_err("Bandwidth req failed(%d) MODE (%d)\n",
+							ret, mode);
+			if (qclk->ce_core_src_clk != NULL) {
+				if (mode == INACTIVE) {
+					ret = __qseecom_enable_clk(CLK_QSEE);
+					if (ret)
+						pr_err("CLK enable failed\n");
+				} else
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+		}
+		qseecom.current_mode = mode;
+	}
+	return ret;
+}
+
+static void qseecom_bw_inactive_req_work(struct work_struct *work)
+{
+	mutex_lock(&app_access_lock);
+	mutex_lock(&qsee_bw_mutex);
+	if (qseecom.timer_running)
+		__qseecom_set_msm_bus_request(INACTIVE);
+	pr_debug("current_mode = %d, cumulative_mode = %d\n",
+				qseecom.current_mode, qseecom.cumulative_mode);
+	qseecom.timer_running = false;
+	mutex_unlock(&qsee_bw_mutex);
+	mutex_unlock(&app_access_lock);
+}
+
+/*static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
+ *{
+ *	schedule_work(&qseecom.bw_inactive_req_ws);
+ *}
+ */
+
+static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+	int ret = 0;
+
+	mutex_lock(&clk_access_lock);
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	if (qclk->clk_access_cnt > 2) {
+		pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
+		ret = -EINVAL;
+		goto err_dec_ref_cnt;
+	}
+	if (qclk->clk_access_cnt == 2)
+		qclk->clk_access_cnt--;
+
+err_dec_ref_cnt:
+	mutex_unlock(&clk_access_lock);
+	return ret;
+}
+
+
+static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
+{
+	int32_t ret = 0;
+	int32_t request_mode = INACTIVE;
+
+	mutex_lock(&qsee_bw_mutex);
+	if (mode == 0) {
+		if (qseecom.cumulative_mode > MEDIUM)
+			request_mode = HIGH;
+		else
+			request_mode = qseecom.cumulative_mode;
+	} else {
+		request_mode = mode;
+	}
+
+	ret = __qseecom_set_msm_bus_request(request_mode);
+	if (ret) {
+		pr_err("set msm bus request failed (%d),request_mode (%d)\n",
+			ret, request_mode);
+		goto err_scale_timer;
+	}
+
+	if (qseecom.timer_running) {
+		ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
+		if (ret) {
+			pr_err("Failed to decrease clk ref count.\n");
+			goto err_scale_timer;
+		}
+		del_timer_sync(&(qseecom.bw_scale_down_timer));
+		qseecom.timer_running = false;
+	}
+err_scale_timer:
+	mutex_unlock(&qsee_bw_mutex);
+	return ret;
+}
+
+
+static int qseecom_unregister_bus_bandwidth_needs(
+					struct qseecom_dev_handle *data)
+{
+	qseecom.cumulative_mode -= data->mode;
+	data->mode = INACTIVE;
+
+	return 0;
+}
+
+static int __qseecom_register_bus_bandwidth_needs(
+			struct qseecom_dev_handle *data, uint32_t request_mode)
+{
+	if (data->mode == INACTIVE) {
+		qseecom.cumulative_mode += request_mode;
+		data->mode = request_mode;
+	} else {
+		if (data->mode != request_mode) {
+			qseecom.cumulative_mode -= data->mode;
+			qseecom.cumulative_mode += request_mode;
+			data->mode = request_mode;
+		}
+	}
+	return 0;
+}
+
+static int qseecom_perf_enable(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	ret = qsee_vote_for_clock(data, CLK_DFAB);
+	if (ret) {
+		pr_err("Failed to vote for DFAB clock with err %d\n", ret);
+		goto perf_enable_exit;
+	}
+	ret = qsee_vote_for_clock(data, CLK_SFPB);
+	if (ret) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		pr_err("Failed to vote for SFPB clock with err %d\n", ret);
+		goto perf_enable_exit;
+	}
+
+perf_enable_exit:
+	return ret;
+}
+
+static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	int32_t ret = 0;
+	int32_t req_mode;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	if (req_mode > HIGH) {
+		pr_err("Invalid bandwidth mode (%d)\n", req_mode);
+		return -EINVAL;
+	}
+
+	/*
+	 * Register bus bandwidth needs if bus scaling feature is enabled;
+	 * otherwise, qseecom enable/disable clocks for the client directly.
+	 */
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
+		mutex_unlock(&qsee_bw_mutex);
+	} else {
+		pr_debug("Bus scaling feature is NOT enabled\n");
+		pr_debug("request bandwidth mode %d for the client\n",
+				req_mode);
+		if (req_mode != INACTIVE) {
+			ret = qseecom_perf_enable(data);
+			if (ret)
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+		} else {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		}
+	}
+	return ret;
+}
+
+static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
+{
+	if (qseecom.no_clock_support)
+		return;
+
+	mutex_lock(&qsee_bw_mutex);
+	qseecom.bw_scale_down_timer.expires = jiffies +
+		msecs_to_jiffies(duration);
+	mod_timer(&(qseecom.bw_scale_down_timer),
+		qseecom.bw_scale_down_timer.expires);
+	qseecom.timer_running = true;
+	mutex_unlock(&qsee_bw_mutex);
+}
+
+static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
+{
+	if (!qseecom.support_bus_scaling)
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	else
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
+}
+
+static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
+		if (ret)
+			pr_err("Failed to set bw MEDIUM.\n");
+	} else {
+		ret = qsee_vote_for_clock(data, CLK_SFPB);
+		if (ret)
+			pr_err("Fail vote for clk SFPB ret %d\n", ret);
+	}
+	return ret;
+}
+
+static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	int32_t ret;
+	struct qseecom_set_sb_mem_param_req req;
+	size_t len;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
+		return -EFAULT;
+
+	if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
+					(req.sb_len == 0)) {
+		pr_err("Invalid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
+			req.ifd_data_fd, req.sb_len, req.virt_sb_base);
+		return -EFAULT;
+	}
+	if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
+			req.sb_len))
+		return -EFAULT;
+
+	ret = qseecom_vaddr_map(req.ifd_data_fd, &data->client.sb_phys,
+				(void **)&data->client.sb_virt,
+				 &data->client.sgt, &data->client.attach,
+				&len, &data->client.dmabuf);
+	if (ret) {
+		pr_err("failed to convert ion_fd %d for lsnr %d with err: %d\n",
+			req.ifd_data_fd, data->client.app_id, ret);
+		return -EINVAL;
+	}
+
+	if (len < req.sb_len) {
+		pr_err("Requested length (0x%x) is > allocated (%zu)\n",
+			req.sb_len, len);
+		ret = -EINVAL;
+		goto exit;
+	}
+	data->client.sb_length = req.sb_len;
+	data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
+
+	return ret;
+exit:
+	if (data->client.dmabuf)
+		qseecom_vaddr_unmap(data->client.sb_virt, data->client.sgt,
+			 data->client.attach, data->client.dmabuf);
+	return ret;
+}
+
+static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	int ret;
+
+	ret = (qseecom.send_resp_flag != 0);
+	return ret || data->abort || ptr_svc->abort;
+}
+
+static int __qseecom_reentrancy_listener_has_sent_rsp(
+			struct qseecom_dev_handle *data,
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	int ret;
+
+	ret = (ptr_svc->send_resp_flag != 0);
+	return ret || data->abort || ptr_svc->abort;
+}
+
+static void __qseecom_clean_listener_sglistinfo(
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	if (ptr_svc->sglist_cnt) {
+		memset(ptr_svc->sglistinfo_ptr, 0,
+			SGLISTINFO_TABLE_SIZE);
+		ptr_svc->sglist_cnt = 0;
+	}
+}
+
+static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp)
+{
+	int ret = 0;
+	int rc = 0;
+	uint32_t lstnr;
+	struct qseecom_client_listener_data_irsp send_data_rsp = {0};
+	struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
+									= {0};
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	sigset_t new_sigset;
+	sigset_t old_sigset;
+	uint32_t status;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = NULL;
+
+	qseecom.app_block_ref_cnt++;
+	while (resp->result == QSEOS_RESULT_INCOMPLETE) {
+		lstnr = resp->data;
+		/*
+		 * Wake up blocking lsitener service with the lstnr id
+		 */
+		mutex_lock(&listener_access_lock);
+		list_for_each_entry(ptr_svc,
+				&qseecom.registered_listener_list_head, list) {
+			if (ptr_svc->svc.listener_id == lstnr) {
+				ptr_svc->listener_in_use = true;
+				ptr_svc->rcv_req_flag = 1;
+				wake_up_interruptible(&ptr_svc->rcv_req_wq);
+				break;
+			}
+		}
+
+		if (ptr_svc == NULL) {
+			pr_err("Listener Svc %d does not exist\n", lstnr);
+			rc = -EINVAL;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		if (!ptr_svc->dmabuf) {
+			pr_err("Client dmabuf is not initialized\n");
+			rc = -EINVAL;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		if (ptr_svc->svc.listener_id != lstnr) {
+			pr_err("Service %d does not exist\n",
+						lstnr);
+			rc = -ERESTARTSYS;
+			ptr_svc = NULL;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		if (ptr_svc->abort == 1) {
+			pr_debug("Service %d abort %d\n",
+						lstnr, ptr_svc->abort);
+			rc = -ENODEV;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
+
+		/* initialize the new signal mask with all signals*/
+		sigfillset(&new_sigset);
+		/* block all signals */
+		sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
+		mutex_unlock(&listener_access_lock);
+		do {
+			/*
+			 * When reentrancy is not supported, check global
+			 * send_resp_flag; otherwise, check this listener's
+			 * send_resp_flag.
+			 */
+			if (!qseecom.qsee_reentrancy_support &&
+				!wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_listener_has_sent_rsp(
+						data, ptr_svc))) {
+				break;
+			}
+
+			if (qseecom.qsee_reentrancy_support &&
+				!wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_reentrancy_listener_has_sent_rsp(
+						data, ptr_svc))) {
+				break;
+			}
+		} while (1);
+		mutex_lock(&listener_access_lock);
+		/* restore signal mask */
+		sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		if (data->abort || ptr_svc->abort) {
+			pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d\n",
+				data->client.app_id, lstnr, ret);
+			rc = -ENODEV;
+			status = QSEOS_RESULT_FAILURE;
+		} else {
+			status = QSEOS_RESULT_SUCCESS;
+		}
+err_resp:
+		qseecom.send_resp_flag = 0;
+		if (ptr_svc) {
+			ptr_svc->send_resp_flag = 0;
+			table = ptr_svc->sglistinfo_ptr;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			send_data_rsp.listener_id  = lstnr;
+			send_data_rsp.status = status;
+			if (table) {
+				send_data_rsp.sglistinfo_ptr =
+					(uint32_t)virt_to_phys(table);
+				send_data_rsp.sglistinfo_len =
+					SGLISTINFO_TABLE_SIZE;
+				dmac_flush_range((void *)table,
+					(void *)table + SGLISTINFO_TABLE_SIZE);
+			}
+			cmd_buf = (void *)&send_data_rsp;
+			cmd_len = sizeof(send_data_rsp);
+		} else {
+			send_data_rsp_64bit.listener_id  = lstnr;
+			send_data_rsp_64bit.status = status;
+			if (table) {
+				send_data_rsp_64bit.sglistinfo_ptr =
+					virt_to_phys(table);
+				send_data_rsp_64bit.sglistinfo_len =
+					SGLISTINFO_TABLE_SIZE;
+				dmac_flush_range((void *)table,
+					(void *)table + SGLISTINFO_TABLE_SIZE);
+			}
+			cmd_buf = (void *)&send_data_rsp_64bit;
+			cmd_len = sizeof(send_data_rsp_64bit);
+		}
+		if (!qseecom.whitelist_support || table == NULL)
+			*(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+		else
+			*(uint32_t *)cmd_buf =
+				QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
+
+		if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				goto exit;
+		}
+
+		if (ptr_svc) {
+			ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf,
+							QSEECOM_CACHE_CLEAN);
+			if (ret)
+				goto exit;
+
+			ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+			ptr_svc->listener_in_use = false;
+			__qseecom_clean_listener_sglistinfo(ptr_svc);
+
+			if (ret) {
+				pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+				goto exit;
+			}
+
+			ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf,
+						QSEECOM_CACHE_INVALIDATE);
+			if (ret)
+				goto exit;
+		} else {
+			ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+			if (ret) {
+				pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+				goto exit;
+			}
+		}
+
+		pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
+			status, resp->result, data->client.app_id, lstnr);
+		if ((resp->result != QSEOS_RESULT_SUCCESS) &&
+			(resp->result != QSEOS_RESULT_INCOMPLETE)) {
+			pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+			ret = -EINVAL;
+		}
+exit:
+		mutex_unlock(&listener_access_lock);
+		if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
+			__qseecom_disable_clk(CLK_QSEE);
+
+	}
+	qseecom.app_block_ref_cnt--;
+	if (rc)
+		return rc;
+
+	return ret;
+}
+
+static int __qseecom_process_reentrancy_blocked_on_listener(
+				struct qseecom_command_scm_resp *resp,
+				struct qseecom_registered_app_list *ptr_app,
+				struct qseecom_dev_handle *data)
+{
+	struct qseecom_registered_listener_list *list_ptr;
+	int ret = 0;
+	struct qseecom_continue_blocked_request_ireq ireq;
+	struct qseecom_command_scm_resp continue_resp;
+	unsigned int session_id;
+	sigset_t new_sigset;
+	sigset_t old_sigset;
+	unsigned long flags;
+	bool found_app = false;
+
+	if (!resp || !data) {
+		pr_err("invalid resp or data pointer\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* find app_id & img_name from list */
+	if (!ptr_app) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+			if ((ptr_app->app_id == data->client.app_id) &&
+				(!strcmp(ptr_app->app_name,
+						data->client.app_name))) {
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+					flags);
+		if (!found_app) {
+			pr_err("app_id %d (%s) is not found\n",
+				data->client.app_id,
+				(char *)data->client.app_name);
+			ret = -ENOENT;
+			goto exit;
+		}
+	}
+
+	do {
+		session_id = resp->resp_type;
+		mutex_lock(&listener_access_lock);
+		list_ptr = __qseecom_find_svc(resp->data);
+		if (!list_ptr) {
+			pr_err("Invalid listener ID %d\n", resp->data);
+			ret = -ENODATA;
+			mutex_unlock(&listener_access_lock);
+			goto exit;
+		}
+		ptr_app->blocked_on_listener_id = resp->data;
+
+		pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
+			resp->data, list_ptr->listener_in_use,
+			session_id, data->client.app_id);
+
+		/* sleep until listener is available */
+		sigfillset(&new_sigset);
+		sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
+		do {
+			qseecom.app_block_ref_cnt++;
+			ptr_app->app_blocked = true;
+			mutex_unlock(&listener_access_lock);
+			mutex_unlock(&app_access_lock);
+			wait_event_freezable(
+				list_ptr->listener_block_app_wq,
+				!list_ptr->listener_in_use);
+			mutex_lock(&app_access_lock);
+			mutex_lock(&listener_access_lock);
+			ptr_app->app_blocked = false;
+			qseecom.app_block_ref_cnt--;
+		}  while (list_ptr->listener_in_use);
+
+		sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+
+		ptr_app->blocked_on_listener_id = 0;
+		pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
+			resp->data, session_id, data->client.app_id);
+
+		/* notify TZ that listener is available */
+		ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+
+		if (qseecom.smcinvoke_support)
+			ireq.app_or_session_id = session_id;
+		else
+			ireq.app_or_session_id = data->client.app_id;
+
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					&ireq, sizeof(ireq),
+					&continue_resp, sizeof(continue_resp));
+		if (ret && qseecom.smcinvoke_support) {
+			/* retry with legacy cmd */
+			qseecom.smcinvoke_support = false;
+			ireq.app_or_session_id = data->client.app_id;
+			ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				&ireq, sizeof(ireq),
+				&continue_resp, sizeof(continue_resp));
+			qseecom.smcinvoke_support = true;
+			if (ret) {
+				pr_err("unblock app %d or session %d fail\n",
+					data->client.app_id, session_id);
+				mutex_unlock(&listener_access_lock);
+				goto exit;
+			}
+		}
+		mutex_unlock(&listener_access_lock);
+		resp->result = continue_resp.result;
+		resp->resp_type = continue_resp.resp_type;
+		resp->data = continue_resp.data;
+		pr_debug("unblock resp = %d\n", resp->result);
+	} while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
+
+	if (resp->result != QSEOS_RESULT_INCOMPLETE) {
+		pr_err("Unexpected unblock resp %d\n", resp->result);
+		ret = -EINVAL;
+	}
+exit:
+	return ret;
+}
+
+static int __qseecom_reentrancy_process_incomplete_cmd(
+					struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp)
+{
+	int ret = 0;
+	int rc = 0;
+	uint32_t lstnr;
+	struct qseecom_client_listener_data_irsp send_data_rsp = {0};
+	struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
+									= {0};
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	sigset_t new_sigset;
+	sigset_t old_sigset;
+	uint32_t status;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = NULL;
+
+	while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
+		lstnr = resp->data;
+		/*
+		 * Wake up blocking lsitener service with the lstnr id
+		 */
+		mutex_lock(&listener_access_lock);
+		list_for_each_entry(ptr_svc,
+				&qseecom.registered_listener_list_head, list) {
+			if (ptr_svc->svc.listener_id == lstnr) {
+				ptr_svc->listener_in_use = true;
+				ptr_svc->rcv_req_flag = 1;
+				wake_up_interruptible(&ptr_svc->rcv_req_wq);
+				break;
+			}
+		}
+
+		if (ptr_svc == NULL) {
+			pr_err("Listener Svc %d does not exist\n", lstnr);
+			rc = -EINVAL;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		if (!ptr_svc->dmabuf) {
+			pr_err("Client dmabuf is not initialized\n");
+			rc = -EINVAL;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		if (ptr_svc->svc.listener_id != lstnr) {
+			pr_err("Service %d does not exist\n",
+						lstnr);
+			rc = -ERESTARTSYS;
+			ptr_svc = NULL;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		if (ptr_svc->abort == 1) {
+			pr_debug("Service %d abort %d\n",
+						lstnr, ptr_svc->abort);
+			rc = -ENODEV;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
+
+		/* initialize the new signal mask with all signals*/
+		sigfillset(&new_sigset);
+
+		/* block all signals */
+		sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
+		/* unlock mutex btw waking listener and sleep-wait */
+		mutex_unlock(&listener_access_lock);
+		mutex_unlock(&app_access_lock);
+		do {
+			if (!wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_reentrancy_listener_has_sent_rsp(
+						data, ptr_svc))) {
+				break;
+			}
+		} while (1);
+		/* lock mutex again after resp sent */
+		mutex_lock(&app_access_lock);
+		mutex_lock(&listener_access_lock);
+		ptr_svc->send_resp_flag = 0;
+		qseecom.send_resp_flag = 0;
+
+		/* restore signal mask */
+		sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		if (data->abort || ptr_svc->abort) {
+			pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d\n",
+				data->client.app_id, lstnr, ret);
+			rc = -ENODEV;
+			status  = QSEOS_RESULT_FAILURE;
+		} else {
+			status  = QSEOS_RESULT_SUCCESS;
+		}
+err_resp:
+		if (ptr_svc)
+			table = ptr_svc->sglistinfo_ptr;
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			send_data_rsp.listener_id  = lstnr;
+			send_data_rsp.status = status;
+			if (table) {
+				send_data_rsp.sglistinfo_ptr =
+					(uint32_t)virt_to_phys(table);
+				send_data_rsp.sglistinfo_len =
+						SGLISTINFO_TABLE_SIZE;
+				dmac_flush_range((void *)table,
+					(void *)table + SGLISTINFO_TABLE_SIZE);
+			}
+			cmd_buf = (void *)&send_data_rsp;
+			cmd_len = sizeof(send_data_rsp);
+		} else {
+			send_data_rsp_64bit.listener_id  = lstnr;
+			send_data_rsp_64bit.status = status;
+			if (table) {
+				send_data_rsp_64bit.sglistinfo_ptr =
+					virt_to_phys(table);
+				send_data_rsp_64bit.sglistinfo_len =
+					SGLISTINFO_TABLE_SIZE;
+				dmac_flush_range((void *)table,
+					(void *)table + SGLISTINFO_TABLE_SIZE);
+			}
+			cmd_buf = (void *)&send_data_rsp_64bit;
+			cmd_len = sizeof(send_data_rsp_64bit);
+		}
+		if (!qseecom.whitelist_support || table == NULL)
+			*(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+		else
+			*(uint32_t *)cmd_buf =
+				QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
+
+		if (lstnr == RPMB_SERVICE) {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				goto exit;
+		}
+
+		if (ptr_svc) {
+			ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf,
+						QSEECOM_CACHE_CLEAN);
+			if (ret)
+				goto exit;
+
+			ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+			ptr_svc->listener_in_use = false;
+			__qseecom_clean_listener_sglistinfo(ptr_svc);
+			wake_up_interruptible(&ptr_svc->listener_block_app_wq);
+
+			if (ret) {
+				pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+				goto exit;
+			}
+			ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf,
+						QSEECOM_CACHE_INVALIDATE);
+			if (ret)
+				goto exit;
+		} else {
+			ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+			if (ret) {
+				pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+				goto exit;
+			}
+		}
+
+		switch (resp->result) {
+		case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+			pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
+					lstnr, data->client.app_id, resp->data);
+			if (lstnr == resp->data) {
+				pr_err("lstnr %d should not be blocked!\n",
+					lstnr);
+				ret = -EINVAL;
+				goto exit;
+			}
+			ret = __qseecom_process_reentrancy_blocked_on_listener(
+					resp, NULL, data);
+			if (ret) {
+				pr_err("failed to process App(%d) %s blocked on listener %d\n",
+					data->client.app_id,
+					data->client.app_name, resp->data);
+				goto exit;
+			}
+		case QSEOS_RESULT_SUCCESS:
+		case QSEOS_RESULT_INCOMPLETE:
+			break;
+		default:
+			pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+			ret = -EINVAL;
+			goto exit;
+		}
+exit:
+		mutex_unlock(&listener_access_lock);
+		if (lstnr == RPMB_SERVICE)
+			__qseecom_disable_clk(CLK_QSEE);
+
+	}
+	if (rc)
+		return rc;
+
+	return ret;
+}
+
+/*
+ * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
+ * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
+ * So, needs to first check if no app blocked before sending OS level scm call,
+ * then wait until all apps are unblocked.
+ */
+static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
+{
+	sigset_t new_sigset, old_sigset;
+
+	if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
+		qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
+		IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
+		/* thread sleep until this app unblocked */
+		while (qseecom.app_block_ref_cnt > 0) {
+			sigfillset(&new_sigset);
+			sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+			mutex_unlock(&app_access_lock);
+			do {
+				if (!wait_event_freezable(qseecom.app_block_wq,
+					(qseecom.app_block_ref_cnt == 0)))
+					break;
+			} while (1);
+			mutex_lock(&app_access_lock);
+			sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		}
+	}
+}
+
+/*
+ * scm_call of send data will fail if this TA is blocked or there are more
+ * than one TA requesting listener services; So, first check to see if need
+ * to wait.
+ */
+static void __qseecom_reentrancy_check_if_this_app_blocked(
+			struct qseecom_registered_app_list *ptr_app)
+{
+	sigset_t new_sigset, old_sigset;
+
+	if (qseecom.qsee_reentrancy_support) {
+		ptr_app->check_block++;
+		while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
+			/* thread sleep until this app unblocked */
+			sigfillset(&new_sigset);
+			sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+			mutex_unlock(&app_access_lock);
+			do {
+				if (!wait_event_freezable(qseecom.app_block_wq,
+					(!ptr_app->app_blocked &&
+					qseecom.app_block_ref_cnt <= 1)))
+					break;
+			} while (1);
+			mutex_lock(&app_access_lock);
+			sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		}
+		ptr_app->check_block--;
+	}
+}
+
+static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
+					uint32_t *app_id)
+{
+	int32_t ret;
+	struct qseecom_command_scm_resp resp;
+	bool found_app = false;
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+
+	if (!app_id) {
+		pr_err("Null pointer to app_id\n");
+		return -EINVAL;
+	}
+	*app_id = 0;
+
+	/* check if app exists and has been registered locally */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(entry,
+			&qseecom.registered_app_list_head, list) {
+		if (!strcmp(entry->app_name, req.app_name)) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (found_app) {
+		pr_debug("Found app with id %d\n", entry->app_id);
+		*app_id = entry->app_id;
+		return 0;
+	}
+
+	memset((void *)&resp, 0, sizeof(resp));
+
+	/*  SCM_CALL  to check if app_id for the mentioned app exists */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+				sizeof(struct qseecom_check_app_ireq),
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to check if app is already loaded failed\n");
+		return -EINVAL;
+	}
+
+	if (resp.result == QSEOS_RESULT_FAILURE)
+		return 0;
+
+	switch (resp.resp_type) {
+	/*qsee returned listener type response */
+	case QSEOS_LISTENER_ID:
+		pr_err("resp type is of listener type instead of app\n");
+		return -EINVAL;
+	case QSEOS_APP_ID:
+		*app_id = resp.data;
+		return 0;
+	default:
+		pr_err("invalid resp type (%d) from qsee\n",
+				resp.resp_type);
+		return -ENODEV;
+	}
+}
+
+static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
+{
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+	u32 app_id = 0;
+	struct qseecom_load_img_req load_img_req;
+	int32_t ret = 0;
+	phys_addr_t pa = 0;
+	void *vaddr = NULL;
+	struct dma_buf_attachment *attach = NULL;
+	struct dma_buf *dmabuf = NULL;
+	struct sg_table *sgt = NULL;
+
+	size_t len;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_check_app_ireq req;
+	struct qseecom_load_app_ireq load_req;
+	struct qseecom_load_app_64bit_ireq load_req_64bit;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	bool first_time = false;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&load_img_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_load_img_req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	/* Check and load cmnlib */
+	if (qseecom.qsee_version > QSEEE_VERSION_00) {
+		if (!qseecom.commonlib_loaded &&
+				load_img_req.app_arch == ELFCLASS32) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib");
+			if (ret) {
+				pr_err("failed to load cmnlib\n");
+				return -EIO;
+			}
+			qseecom.commonlib_loaded = true;
+			pr_debug("cmnlib is loaded\n");
+		}
+
+		if (!qseecom.commonlib64_loaded &&
+				load_img_req.app_arch == ELFCLASS64) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib64");
+			if (ret) {
+				pr_err("failed to load cmnlib64\n");
+				return -EIO;
+			}
+			qseecom.commonlib64_loaded = true;
+			pr_debug("cmnlib64 is loaded\n");
+		}
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret)
+			return ret;
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret)
+		goto enable_clk_err;
+
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
+	strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
+
+	ret = __qseecom_check_app_exists(req, &app_id);
+	if (ret < 0)
+		goto loadapp_err;
+
+	if (app_id) {
+		pr_debug("App id %d (%s) already exists\n", app_id,
+			(char *)(req.app_name));
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+		&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				if (entry->ref_cnt == U32_MAX) {
+					pr_err("App %d (%s) ref_cnt overflow\n",
+						app_id, req.app_name);
+					ret = -EINVAL;
+					goto loadapp_err;
+				}
+				entry->ref_cnt++;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+			&qseecom.registered_app_list_lock, flags);
+		ret = 0;
+	} else {
+		first_time = true;
+		pr_warn("App (%s) does'nt exist, loading apps for first time\n",
+			(char *)(load_img_req.img_name));
+
+		ret = qseecom_vaddr_map(load_img_req.ifd_data_fd,
+				&pa, &vaddr, &sgt, &attach, &len, &dmabuf);
+		if (ret) {
+			pr_err("Ion client could not retrieve the handle\n");
+			ret = -ENOMEM;
+			goto loadapp_err;
+		}
+
+		if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
+			pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
+					len, load_img_req.mdt_len,
+					load_img_req.img_len);
+			ret = -EINVAL;
+			goto loadapp_err;
+		}
+		/* Populate the structure for sending scm call to load image */
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+			load_req.mdt_len = load_img_req.mdt_len;
+			load_req.img_len = load_img_req.img_len;
+			strlcpy(load_req.app_name, load_img_req.img_name,
+						MAX_APP_NAME_SIZE);
+			load_req.phy_addr = (uint32_t)pa;
+			cmd_buf = (void *)&load_req;
+			cmd_len = sizeof(struct qseecom_load_app_ireq);
+		} else {
+			load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+			load_req_64bit.mdt_len = load_img_req.mdt_len;
+			load_req_64bit.img_len = load_img_req.img_len;
+			strlcpy(load_req_64bit.app_name, load_img_req.img_name,
+						MAX_APP_NAME_SIZE);
+			load_req_64bit.phy_addr = (uint64_t)pa;
+			cmd_buf = (void *)&load_req_64bit;
+			cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+		}
+
+		ret = qseecom_dmabuf_cache_operations(dmabuf,
+						QSEECOM_CACHE_CLEAN);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			goto loadapp_err;
+		}
+
+		/*  SCM_CALL  to load the app and get the app_id back */
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
+			cmd_len, &resp, sizeof(resp));
+		if (ret) {
+			pr_err("scm_call to load app failed\n");
+			ret = -EINVAL;
+			goto loadapp_err;
+		}
+		ret = qseecom_dmabuf_cache_operations(dmabuf,
+						QSEECOM_CACHE_INVALIDATE);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			goto loadapp_err;
+		}
+
+		if (resp.result == QSEOS_RESULT_FAILURE) {
+			pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+			ret = -EFAULT;
+			goto loadapp_err;
+		}
+
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+					ret);
+				ret = -EFAULT;
+				goto loadapp_err;
+			}
+		}
+
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call failed resp.result unknown, %d\n",
+				resp.result);
+			ret = -EFAULT;
+			goto loadapp_err;
+		}
+
+		app_id = resp.data;
+
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			ret = -ENOMEM;
+			goto loadapp_err;
+		}
+		entry->app_id = app_id;
+		entry->ref_cnt = 1;
+		entry->app_arch = load_img_req.app_arch;
+		/*
+		 * keymaster app may be first loaded as "keymaste" by qseecomd,
+		 * and then used as "keymaster" on some targets. To avoid app
+		 * name checking error, register "keymaster" into app_list and
+		 * thread private data.
+		 */
+		if (!strcmp(load_img_req.img_name, "keymaste"))
+			strlcpy(entry->app_name, "keymaster",
+					MAX_APP_NAME_SIZE);
+		else
+			strlcpy(entry->app_name, load_img_req.img_name,
+					MAX_APP_NAME_SIZE);
+		entry->app_blocked = false;
+		entry->blocked_on_listener_id = 0;
+		entry->check_block = 0;
+
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+									flags);
+
+		pr_warn("App with id %u (%s) now loaded\n", app_id,
+		(char *)(load_img_req.img_name));
+	}
+	data->client.app_id = app_id;
+	data->client.app_arch = load_img_req.app_arch;
+	if (!strcmp(load_img_req.img_name, "keymaste"))
+		strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
+	else
+		strlcpy(data->client.app_name, load_img_req.img_name,
+					MAX_APP_NAME_SIZE);
+	load_img_req.app_id = app_id;
+	if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+		if (first_time) {
+			spin_lock_irqsave(
+				&qseecom.registered_app_list_lock, flags);
+			list_del(&entry->list);
+			spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+			kzfree(entry);
+		}
+	}
+
+loadapp_err:
+	__qseecom_disable_clk_scale_down(data);
+	if (dmabuf)
+		qseecom_vaddr_unmap(vaddr, sgt, attach, dmabuf);
+enable_clk_err:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+	return ret;
+}
+
+static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
+{
+	int ret = 1;	/* Set unload app */
+
+	wake_up_all(&qseecom.send_resp_wq);
+	if (qseecom.qsee_reentrancy_support)
+		mutex_unlock(&app_access_lock);
+	while (atomic_read(&data->ioctl_count) > 1) {
+		if (wait_event_freezable(data->abort_wq,
+					atomic_read(&data->ioctl_count) <= 1)) {
+			pr_err("Interrupted from abort\n");
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	if (qseecom.qsee_reentrancy_support)
+		mutex_lock(&app_access_lock);
+	return ret;
+}
+
+static int qseecom_unload_app(struct qseecom_dev_handle *data,
+				bool app_crash)
+{
+	unsigned long flags;
+	unsigned long flags1;
+	int ret = 0;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_registered_app_list *ptr_app = NULL;
+	bool unload = false;
+	bool found_app = false;
+	bool found_dead_app = false;
+
+	if (!data) {
+		pr_err("Invalid/uninitialized device handle\n");
+		return -EINVAL;
+	}
+
+	if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
+		pr_debug("Do not unload keymaster app from tz\n");
+		goto unload_exit;
+	}
+
+	__qseecom_cleanup_app(data);
+	__qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
+
+	if (data->client.app_id > 0) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+									list) {
+			if (ptr_app->app_id == data->client.app_id) {
+				if (!strcmp((void *)ptr_app->app_name,
+					(void *)data->client.app_name)) {
+					found_app = true;
+					if (ptr_app->app_blocked ||
+							ptr_app->check_block)
+						app_crash = false;
+					if (app_crash || ptr_app->ref_cnt == 1)
+						unload = true;
+					break;
+				}
+				found_dead_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+								flags);
+		if (!found_app && !found_dead_app) {
+			pr_err("Cannot find app with id = %d (%s)\n",
+				data->client.app_id,
+				(char *)data->client.app_name);
+			ret = -EINVAL;
+			goto unload_exit;
+		}
+	}
+
+	if (found_dead_app)
+		pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
+			(char *)data->client.app_name);
+
+	if (unload) {
+		struct qseecom_unload_app_ireq req;
+		/* Populate the structure for sending scm call to load image */
+		req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
+		req.app_id = data->client.app_id;
+
+		/* SCM_CALL to unload the app */
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+				sizeof(struct qseecom_unload_app_ireq),
+				&resp, sizeof(resp));
+		if (ret) {
+			pr_err("scm_call to unload app (id = %d) failed\n",
+								req.app_id);
+			ret = -EFAULT;
+			goto unload_exit;
+		} else {
+			pr_warn("App id %d now unloaded\n", req.app_id);
+		}
+		if (resp.result == QSEOS_RESULT_FAILURE) {
+			pr_err("app (%d) unload_failed!!\n",
+					data->client.app_id);
+			ret = -EFAULT;
+			goto unload_exit;
+		}
+		if (resp.result == QSEOS_RESULT_SUCCESS)
+			pr_debug("App (%d) is unloaded!!\n",
+					data->client.app_id);
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd fail err: %d\n",
+									ret);
+				goto unload_exit;
+			}
+		}
+	}
+
+unload_exit:
+	if (found_app) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
+		if (app_crash) {
+			ptr_app->ref_cnt = 0;
+			pr_debug("app_crash: ref_count = 0\n");
+		} else {
+			if (ptr_app->ref_cnt == 1) {
+				ptr_app->ref_cnt = 0;
+				pr_debug("ref_count set to 0\n");
+			} else {
+				ptr_app->ref_cnt--;
+				pr_debug("Can't unload app(%d) inuse\n",
+					ptr_app->app_id);
+			}
+		}
+		if (unload) {
+			list_del(&ptr_app->list);
+			kzfree(ptr_app);
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+								flags1);
+	}
+
+	if (data->client.dmabuf)
+		qseecom_vaddr_unmap(data->client.sb_virt, data->client.sgt,
+			data->client.attach, data->client.dmabuf);
+	data->released = true;
+	return ret;
+}
+
+static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
+						unsigned long virt)
+{
+	return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
+}
+
+static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
+						unsigned long virt)
+{
+	return (uintptr_t)data->client.sb_virt +
+				(virt - data->client.user_virt_sb_base);
+}
+
+int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
+		struct qseecom_send_svc_cmd_req *req_ptr,
+		struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
+{
+	int ret = 0;
+	void *req_buf = NULL;
+
+	if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+		pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
+			req_ptr, send_svc_ireq_ptr);
+		return -EINVAL;
+	}
+
+	/* Clients need to ensure req_buf is at base offset of shared buffer */
+	if ((uintptr_t)req_ptr->cmd_req_buf !=
+			data_ptr->client.user_virt_sb_base) {
+		pr_err("cmd buf not pointing to base offset of shared buffer\n");
+		return -EINVAL;
+	}
+
+	if (data_ptr->client.sb_length <
+			sizeof(struct qseecom_rpmb_provision_key)) {
+		pr_err("shared buffer is too small to hold key type\n");
+		return -EINVAL;
+	}
+	req_buf = data_ptr->client.sb_virt;
+
+	send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+	send_svc_ireq_ptr->key_type =
+		((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
+	send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+	send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->resp_buf));
+	send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+	return ret;
+}
+
+int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
+		struct qseecom_send_svc_cmd_req *req_ptr,
+		struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
+{
+	int ret = 0;
+	uint32_t reqd_len_sb_in = 0;
+
+	if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+		pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
+			req_ptr, send_svc_ireq_ptr);
+		return -EINVAL;
+	}
+
+	reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
+	if (reqd_len_sb_in > data_ptr->client.sb_length) {
+		pr_err("Not enough memory to fit cmd_buf and resp_buf.\n");
+		pr_err("Required: %u, Available: %zu\n",
+				reqd_len_sb_in, data_ptr->client.sb_length);
+		return -ENOMEM;
+	}
+
+	send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+	send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+	send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->resp_buf));
+	send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+	send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
+
+
+	return ret;
+}
+
+static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
+				struct qseecom_send_svc_cmd_req *req)
+{
+	if (!req || !req->resp_buf || !req->cmd_req_buf) {
+		pr_err("req or cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+
+	if (!data || !data->client.sb_virt) {
+		pr_err("Client or client buf is not initialized\n");
+		return -EINVAL;
+	}
+
+	if (data->client.sb_virt == NULL) {
+		pr_err("sb_virt null\n");
+		return -EINVAL;
+	}
+
+	if (data->client.user_virt_sb_base == 0) {
+		pr_err("user_virt_sb_base is null\n");
+		return -EINVAL;
+	}
+
+	if (data->client.sb_length == 0) {
+		pr_err("sb_length is 0\n");
+		return -EINVAL;
+	}
+
+	if (((uintptr_t)req->cmd_req_buf <
+				data->client.user_virt_sb_base) ||
+		((uintptr_t)req->cmd_req_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->resp_buf <
+				data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
+		(req->cmd_req_len > data->client.sb_length) ||
+		(req->resp_len > data->client.sb_length)) {
+		pr_err("cmd buf length or response buf length not valid\n");
+		return -EINVAL;
+	}
+	if (req->cmd_req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+				(req->cmd_req_len + req->resp_len),
+					data->client.sb_length);
+		return -ENOMEM;
+	}
+	if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
+		pr_err("Integer overflow in req_len & cmd_req_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+					data->client.sb_length)) ||
+		(((uintptr_t)req->resp_buf + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+					data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_client_send_service_ireq send_svc_ireq;
+	struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_send_svc_cmd_req req;
+	void   *send_req_ptr;
+	size_t req_buf_size;
+
+	/*struct qseecom_command_scm_resp resp;*/
+
+	if (copy_from_user(&req,
+				(void __user *)argp,
+				sizeof(req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	if (__validate_send_service_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	data->type = QSEECOM_SECURE_SERVICE;
+
+	switch (req.cmd_id) {
+	case QSEOS_RPMB_PROVISION_KEY_COMMAND:
+	case QSEOS_RPMB_ERASE_COMMAND:
+	case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
+		send_req_ptr = &send_svc_ireq;
+		req_buf_size = sizeof(send_svc_ireq);
+		if (__qseecom_process_rpmb_svc_cmd(data, &req,
+				send_req_ptr))
+			return -EINVAL;
+		break;
+	case QSEOS_FSM_LTEOTA_REQ_CMD:
+	case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
+	case QSEOS_FSM_IKE_REQ_CMD:
+	case QSEOS_FSM_IKE_REQ_RSP_CMD:
+	case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
+	case QSEOS_FSM_OEM_FUSE_READ_ROW:
+	case QSEOS_FSM_ENCFS_REQ_CMD:
+	case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
+		send_req_ptr = &send_fsm_key_svc_ireq;
+		req_buf_size = sizeof(send_fsm_key_svc_ireq);
+		if (__qseecom_process_fsm_key_svc_cmd(data, &req,
+				send_req_ptr))
+			return -EINVAL;
+		break;
+	default:
+		pr_err("Unsupported cmd_id %d\n", req.cmd_id);
+		return -EINVAL;
+	}
+
+	ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_CLEAN);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(HIGH);
+		if (ret) {
+			pr_err("Fail to set bw HIGH\n");
+			return ret;
+		}
+	} else {
+		ret = qseecom_perf_enable(data);
+		if (ret) {
+			pr_err("Failed to vote for clocks with err %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				(const void *)send_req_ptr,
+				req_buf_size, &resp, sizeof(resp));
+
+	if (ret) {
+		pr_err("qseecom_scm_call failed with err: %d\n", ret);
+		goto exit;
+	}
+
+	ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		pr_debug("qseos_result_incomplete\n");
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd fail with result: %d\n",
+				resp.result);
+		}
+		if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
+			pr_warn("RPMB key status is 0x%x\n", resp.result);
+			if (put_user(resp.result,
+				(uint32_t __user *)req.resp_buf)) {
+				ret = -EINVAL;
+				goto exit;
+			}
+			ret = 0;
+		}
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed with resp.result: %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	default:
+		pr_err("Response result %d not supported\n",
+				resp.result);
+		ret = -EINVAL;
+		break;
+	}
+
+exit:
+	if (!qseecom.support_bus_scaling) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	} else {
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+	}
+	return ret;
+}
+
+static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
+				struct qseecom_send_cmd_req *req)
+
+{
+	if (!data || !data->client.sb_virt) {
+		pr_err("Client or client buf is not initialized\n");
+		return -EINVAL;
+	}
+	if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
+						(req->cmd_req_buf == NULL)) {
+		pr_err("cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->cmd_req_buf <
+				data->client.user_virt_sb_base) ||
+		((uintptr_t)req->cmd_req_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->resp_buf <
+				data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if ((req->cmd_req_len == 0) ||
+		(req->cmd_req_len > data->client.sb_length) ||
+		(req->resp_len > data->client.sb_length)) {
+		pr_err("cmd buf length or response buf length not valid\n");
+		return -EINVAL;
+	}
+	if (req->cmd_req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+				(req->cmd_req_len + req->resp_len),
+					data->client.sb_length);
+		return -ENOMEM;
+	}
+	if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
+		pr_err("Integer overflow in req_len & cmd_req_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length)) ||
+		(((uintptr_t)req->resp_buf + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
+				struct qseecom_registered_app_list *ptr_app,
+				struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	switch (resp->result) {
+	case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+		pr_warn("App(%d) %s is blocked on listener %d\n",
+			data->client.app_id, data->client.app_name,
+			resp->data);
+		ret = __qseecom_process_reentrancy_blocked_on_listener(
+					resp, ptr_app, data);
+		if (ret) {
+			pr_err("failed to process App(%d) %s is blocked on listener %d\n",
+			data->client.app_id, data->client.app_name, resp->data);
+			return ret;
+		}
+		/* fall through to process incomplete request */
+	case QSEOS_RESULT_INCOMPLETE:
+		qseecom.app_block_ref_cnt++;
+		ptr_app->app_blocked = true;
+		ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
+		ptr_app->app_blocked = false;
+		qseecom.app_block_ref_cnt--;
+		wake_up_interruptible(&qseecom.app_block_wq);
+		if (ret)
+			pr_err("process_incomplete_cmd failed err: %d\n",
+					ret);
+		return ret;
+	case QSEOS_RESULT_SUCCESS:
+		return ret;
+	default:
+		pr_err("Response result %d not supported\n",
+						resp->result);
+		return -EINVAL;
+	}
+}
+
+static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
+				struct qseecom_send_cmd_req *req)
+{
+	int ret = 0;
+	u32 reqd_len_sb_in = 0;
+	struct qseecom_client_send_data_ireq send_data_req = {0};
+	struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
+	struct qseecom_command_scm_resp resp;
+	unsigned long flags;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+
+	reqd_len_sb_in = req->cmd_req_len + req->resp_len;
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		send_data_req.app_id = data->client.app_id;
+		send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+					data, (uintptr_t)req->cmd_req_buf));
+		send_data_req.req_len = req->cmd_req_len;
+		send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+					data, (uintptr_t)req->resp_buf));
+		send_data_req.rsp_len = req->resp_len;
+		send_data_req.sglistinfo_ptr =
+				(uint32_t)virt_to_phys(table);
+		send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&send_data_req;
+		cmd_len = sizeof(struct qseecom_client_send_data_ireq);
+	} else {
+		send_data_req_64bit.app_id = data->client.app_id;
+		send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
+					(uintptr_t)req->cmd_req_buf);
+		send_data_req_64bit.req_len = req->cmd_req_len;
+		send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
+					(uintptr_t)req->resp_buf);
+		send_data_req_64bit.rsp_len = req->resp_len;
+		/* check if 32bit app's phys_addr region is under 4GB.*/
+		if ((data->client.app_arch == ELFCLASS32) &&
+			((send_data_req_64bit.req_ptr >=
+				PHY_ADDR_4G - send_data_req_64bit.req_len) ||
+			(send_data_req_64bit.rsp_ptr >=
+				PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
+			pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
+				data->client.app_name,
+				send_data_req_64bit.req_ptr,
+				send_data_req_64bit.req_len,
+				send_data_req_64bit.rsp_ptr,
+				send_data_req_64bit.rsp_len);
+			return -EFAULT;
+		}
+		send_data_req_64bit.sglistinfo_ptr =
+				(uint64_t)virt_to_phys(table);
+		send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&send_data_req_64bit;
+		cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
+	}
+
+	if (!qseecom.whitelist_support || data->use_legacy_cmd)
+		*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
+	else
+		*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
+
+	if (data->client.dmabuf) {
+		ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_CLEAN);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			return ret;
+		}
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		goto exit;
+	}
+	if (data->client.dmabuf) {
+		ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			goto exit;
+		}
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+		if (ret)
+			goto exit;
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				goto exit;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+				goto exit;
+			}
+		}
+	}
+exit:
+	__qseecom_processing_pending_lsnr_unregister();
+	return ret;
+}
+
+static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_send_cmd_req req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (__validate_send_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	ret = __qseecom_send_cmd(data, &req);
+
+	return ret;
+}
+
+int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
+			struct qseecom_send_modfd_listener_resp *lstnr_resp,
+			struct qseecom_dev_handle *data, int i)
+{
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+		if ((req->cmd_req_len < sizeof(uint32_t)) ||
+			(req->ifd_data[i].cmd_buf_offset >
+			req->cmd_req_len - sizeof(uint32_t))) {
+			pr_err("Invalid offset (req len) 0x%x\n",
+				req->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+		if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
+			(lstnr_resp->ifd_data[i].cmd_buf_offset >
+			lstnr_resp->resp_len - sizeof(uint32_t))) {
+			pr_err("Invalid offset (lstnr resp len) 0x%x\n",
+				lstnr_resp->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
+			struct qseecom_dev_handle *data)
+{
+	char *field;
+	int ret = 0;
+	int i = 0;
+	uint32_t len = 0;
+	struct scatterlist *sg;
+	struct qseecom_send_modfd_cmd_req *req = NULL;
+	struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+	uint32_t offset;
+	struct sg_table *sg_ptr = NULL;
+	int ion_fd = -1;
+	struct dma_buf *dmabuf = NULL;
+	struct dma_buf_attachment *attach = NULL;
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP))
+		return -EFAULT;
+
+	if (msg == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	if (data->type == QSEECOM_LISTENER_SERVICE) {
+		lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
+		this_lstnr = __qseecom_find_svc(data->listener.id);
+		if (IS_ERR_OR_NULL(this_lstnr)) {
+			pr_err("Invalid listener ID\n");
+			return -ENOMEM;
+		}
+	} else {
+		req = (struct qseecom_send_modfd_cmd_req *)msg;
+	}
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+			ion_fd = req->ifd_data[i].fd;
+			field = (char *) req->cmd_req_buf +
+				req->ifd_data[i].cmd_buf_offset;
+		} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+				(lstnr_resp->ifd_data[i].fd > 0)) {
+			ion_fd = lstnr_resp->ifd_data[i].fd;
+			field = lstnr_resp->resp_buf_ptr +
+				lstnr_resp->ifd_data[i].cmd_buf_offset;
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf);
+		if (ret) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		if (sg_ptr->nents == 0) {
+			pr_err("Num of scattered entries is 0\n");
+			goto err;
+		}
+		if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+			pr_err("Num of scattered entries\n");
+			pr_err(" (%d) is greater than max supported %d\n",
+				sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+			goto err;
+		}
+		sg = sg_ptr->sgl;
+		if (sg_ptr->nents == 1) {
+			uint32_t *update;
+
+			if (__boundary_checks_offset(req, lstnr_resp, data, i))
+				goto err;
+			if ((data->type == QSEECOM_CLIENT_APP &&
+				(data->client.app_arch == ELFCLASS32 ||
+				data->client.app_arch == ELFCLASS64)) ||
+				(data->type == QSEECOM_LISTENER_SERVICE)) {
+				/*
+				 * Check if sg list phy add region is under 4GB
+				 */
+				if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
+					(!cleanup) &&
+					((uint64_t)sg_dma_address(sg_ptr->sgl)
+					>= PHY_ADDR_4G - sg->length)) {
+					pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
+						data->client.app_name,
+						&(sg_dma_address(sg_ptr->sgl)),
+						sg->length);
+					goto err;
+				}
+				update = (uint32_t *) field;
+				*update = cleanup ? 0 :
+					(uint32_t)sg_dma_address(sg_ptr->sgl);
+			} else {
+				pr_err("QSEE app arch %u is not supported\n",
+							data->client.app_arch);
+				goto err;
+			}
+			len += (uint32_t)sg->length;
+		} else {
+			struct qseecom_sg_entry *update;
+			int j = 0;
+
+			if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+					(req->ifd_data[i].fd > 0)) {
+
+				if ((req->cmd_req_len <
+					 SG_ENTRY_SZ * sg_ptr->nents) ||
+					(req->ifd_data[i].cmd_buf_offset >
+						(req->cmd_req_len -
+						SG_ENTRY_SZ * sg_ptr->nents))) {
+					pr_err("Invalid offset = 0x%x\n",
+					req->ifd_data[i].cmd_buf_offset);
+					goto err;
+				}
+
+			} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+
+				if ((lstnr_resp->resp_len <
+						SG_ENTRY_SZ * sg_ptr->nents) ||
+				(lstnr_resp->ifd_data[i].cmd_buf_offset >
+						(lstnr_resp->resp_len -
+						SG_ENTRY_SZ * sg_ptr->nents))) {
+					goto err;
+				}
+			}
+			if ((data->type == QSEECOM_CLIENT_APP &&
+				(data->client.app_arch == ELFCLASS32 ||
+				data->client.app_arch == ELFCLASS64)) ||
+				(data->type == QSEECOM_LISTENER_SERVICE)) {
+				update = (struct qseecom_sg_entry *)field;
+				for (j = 0; j < sg_ptr->nents; j++) {
+					/*
+					 * Check if sg list PA is under 4GB
+					 */
+					if ((qseecom.qsee_version >=
+						QSEE_VERSION_40) &&
+						(!cleanup) &&
+						((uint64_t)(sg_dma_address(sg))
+						>= PHY_ADDR_4G - sg->length)) {
+						pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
+							data->client.app_name,
+							&(sg_dma_address(sg)),
+							sg->length);
+						goto err;
+					}
+					update->phys_addr = cleanup ? 0 :
+						(uint32_t)sg_dma_address(sg);
+					update->len = cleanup ? 0 : sg->length;
+					update++;
+					len += sg->length;
+					sg = sg_next(sg);
+				}
+			} else {
+				pr_err("QSEE app arch %u is not supported\n",
+							data->client.app_arch);
+					goto err;
+			}
+		}
+
+		if (cleanup) {
+			ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_CLEAN);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			if (data->type == QSEECOM_CLIENT_APP) {
+				offset = req->ifd_data[i].cmd_buf_offset;
+				data->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 0, offset);
+				data->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				data->sglist_cnt = i + 1;
+			} else {
+				offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+					+ (uintptr_t)lstnr_resp->resp_buf_ptr -
+					(uintptr_t)this_lstnr->sb_virt);
+				this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 0, offset);
+				this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				this_lstnr->sglist_cnt = i + 1;
+			}
+		}
+		/* Deallocate the kbuf */
+		qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
+		sg_ptr = NULL;
+		dmabuf = NULL;
+		attach = NULL;
+	}
+	return ret;
+err:
+	if (!IS_ERR_OR_NULL(sg_ptr))
+		qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
+	return -ENOMEM;
+}
+
+static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
+		char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+	struct scatterlist *sg = sg_ptr->sgl;
+	struct qseecom_sg_entry_64bit *sg_entry;
+	struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
+	void *buf;
+	uint i;
+	size_t size;
+	dma_addr_t coh_pmem;
+
+	if (fd_idx >= MAX_ION_FD) {
+		pr_err("fd_idx [%d] is invalid\n", fd_idx);
+		return -ENOMEM;
+	}
+	buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
+	memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
+	/* Allocate a contiguous kernel buffer */
+	size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	buf = dma_alloc_coherent(qseecom.dev,
+			size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("failed to alloc memory for sg buf\n");
+		return -ENOMEM;
+	}
+	/* update qseecom_sg_list_buf_hdr_64bit */
+	buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
+	buf_hdr->new_buf_phys_addr = coh_pmem;
+	buf_hdr->nents_total = sg_ptr->nents;
+	/* save the left sg entries into new allocated buf */
+	sg_entry = (struct qseecom_sg_entry_64bit *)buf;
+	for (i = 0; i < sg_ptr->nents; i++) {
+		sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
+		sg_entry->len = sg->length;
+		sg_entry++;
+		sg = sg_next(sg);
+	}
+
+	data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+	data->client.sec_buf_fd[fd_idx].vbase = buf;
+	data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+	data->client.sec_buf_fd[fd_idx].size = size;
+
+	return 0;
+}
+
+static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
+			struct qseecom_dev_handle *data)
+{
+	char *field;
+	int ret = 0;
+	int i = 0;
+	uint32_t len = 0;
+	struct scatterlist *sg;
+	struct qseecom_send_modfd_cmd_req *req = NULL;
+	struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+	uint32_t offset;
+	struct sg_table *sg_ptr;
+	int ion_fd = -1;
+	struct dma_buf *dmabuf = NULL;
+	struct dma_buf_attachment *attach = NULL;
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP))
+		return -EFAULT;
+
+	if (msg == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	if (data->type == QSEECOM_LISTENER_SERVICE) {
+		lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
+		this_lstnr = __qseecom_find_svc(data->listener.id);
+		if (IS_ERR_OR_NULL(this_lstnr)) {
+			pr_err("Invalid listener ID\n");
+			return -ENOMEM;
+		}
+	} else {
+		req = (struct qseecom_send_modfd_cmd_req *)msg;
+	}
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+			ion_fd = req->ifd_data[i].fd;
+			field = (char *) req->cmd_req_buf +
+				req->ifd_data[i].cmd_buf_offset;
+		} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+				(lstnr_resp->ifd_data[i].fd > 0)) {
+			ion_fd = lstnr_resp->ifd_data[i].fd;
+			field = lstnr_resp->resp_buf_ptr +
+				lstnr_resp->ifd_data[i].cmd_buf_offset;
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf);
+		if (ret) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		if (sg_ptr->nents == 0) {
+			pr_err("Num of scattered entries is 0\n");
+			goto err;
+		}
+		if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+			pr_warn("Num of scattered entries\n");
+			pr_warn(" (%d) is greater than %d\n",
+				sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+			if (cleanup) {
+				if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+					data->client.sec_buf_fd[i].vbase)
+					dma_free_coherent(qseecom.dev,
+					data->client.sec_buf_fd[i].size,
+					data->client.sec_buf_fd[i].vbase,
+					data->client.sec_buf_fd[i].pbase);
+			} else {
+				ret = __qseecom_allocate_sg_list_buffer(data,
+						field, i, sg_ptr);
+				if (ret) {
+					pr_err("Failed to allocate sg list buffer\n");
+					goto err;
+				}
+			}
+			len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
+			sg = sg_ptr->sgl;
+			goto cleanup;
+		}
+		sg = sg_ptr->sgl;
+		if (sg_ptr->nents == 1) {
+			uint64_t *update_64bit;
+
+			if (__boundary_checks_offset(req, lstnr_resp, data, i))
+				goto err;
+				/* 64bit app uses 64bit address */
+			update_64bit = (uint64_t *) field;
+			*update_64bit = cleanup ? 0 :
+					(uint64_t)sg_dma_address(sg_ptr->sgl);
+			len += (uint32_t)sg->length;
+		} else {
+			struct qseecom_sg_entry_64bit *update_64bit;
+			int j = 0;
+
+			if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+					(req->ifd_data[i].fd > 0)) {
+
+				if ((req->cmd_req_len <
+					 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
+					(req->ifd_data[i].cmd_buf_offset >
+					(req->cmd_req_len -
+					SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
+					pr_err("Invalid offset = 0x%x\n",
+					req->ifd_data[i].cmd_buf_offset);
+					goto err;
+				}
+
+			} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+
+				if ((lstnr_resp->resp_len <
+					SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
+				(lstnr_resp->ifd_data[i].cmd_buf_offset >
+						(lstnr_resp->resp_len -
+					SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
+					goto err;
+				}
+			}
+			/* 64bit app uses 64bit address */
+			update_64bit = (struct qseecom_sg_entry_64bit *)field;
+			for (j = 0; j < sg_ptr->nents; j++) {
+				update_64bit->phys_addr = cleanup ? 0 :
+					(uint64_t)sg_dma_address(sg);
+				update_64bit->len = cleanup ? 0 :
+						(uint32_t)sg->length;
+				update_64bit++;
+				len += sg->length;
+				sg = sg_next(sg);
+			}
+		}
+cleanup:
+		if (cleanup) {
+			ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_CLEAN);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			if (data->type == QSEECOM_CLIENT_APP) {
+				offset = req->ifd_data[i].cmd_buf_offset;
+				data->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 1, offset);
+				data->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				data->sglist_cnt = i + 1;
+			} else {
+				offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+					+ (uintptr_t)lstnr_resp->resp_buf_ptr -
+					(uintptr_t)this_lstnr->sb_virt);
+				this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 1, offset);
+				this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				this_lstnr->sglist_cnt = i + 1;
+			}
+		}
+		/* unmap the dmabuf */
+		qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
+		sg_ptr = NULL;
+		dmabuf = NULL;
+		attach = NULL;
+	}
+	return ret;
+err:
+	for (i = 0; i < MAX_ION_FD; i++)
+		if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+			data->client.sec_buf_fd[i].vbase)
+			dma_free_coherent(qseecom.dev,
+				data->client.sec_buf_fd[i].size,
+				data->client.sec_buf_fd[i].vbase,
+				data->client.sec_buf_fd[i].pbase);
+	if (!IS_ERR_OR_NULL(sg_ptr))
+		qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
+	return -ENOMEM;
+}
+
+static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+					void __user *argp,
+					bool is_64bit_addr)
+{
+	int ret = 0;
+	int i;
+	struct qseecom_send_modfd_cmd_req req;
+	struct qseecom_send_cmd_req send_cmd_req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	send_cmd_req.cmd_req_buf = req.cmd_req_buf;
+	send_cmd_req.cmd_req_len = req.cmd_req_len;
+	send_cmd_req.resp_buf = req.resp_buf;
+	send_cmd_req.resp_len = req.resp_len;
+
+	if (__validate_send_cmd_inputs(data, &send_cmd_req))
+		return -EINVAL;
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
+			pr_err("Invalid offset %d = 0x%x\n",
+				i, req.ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+	req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.cmd_req_buf);
+	req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.resp_buf);
+
+	if (!is_64bit_addr) {
+		ret = __qseecom_update_cmd_buf(&req, false, data);
+		if (ret)
+			return ret;
+		ret = __qseecom_send_cmd(data, &send_cmd_req);
+		if (ret)
+			return ret;
+		ret = __qseecom_update_cmd_buf(&req, true, data);
+		if (ret)
+			return ret;
+	} else {
+		ret = __qseecom_update_cmd_buf_64(&req, false, data);
+		if (ret)
+			return ret;
+		ret = __qseecom_send_cmd(data, &send_cmd_req);
+		if (ret)
+			return ret;
+		ret = __qseecom_update_cmd_buf_64(&req, true, data);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	return __qseecom_send_modfd_cmd(data, argp, false);
+}
+
+static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	return __qseecom_send_modfd_cmd(data, argp, true);
+}
+
+
+
+static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
+		struct qseecom_registered_listener_list *svc)
+{
+	int ret;
+
+	ret = (svc->rcv_req_flag == 1);
+	return ret || data->abort;
+}
+
+static int qseecom_receive_req(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	struct qseecom_registered_listener_list *this_lstnr;
+
+	mutex_lock(&listener_access_lock);
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (!this_lstnr) {
+		pr_err("Invalid listener ID\n");
+		mutex_unlock(&listener_access_lock);
+		return -ENODATA;
+	}
+	mutex_unlock(&listener_access_lock);
+
+	while (1) {
+		if (wait_event_freezable(this_lstnr->rcv_req_wq,
+				__qseecom_listener_has_rcvd_req(data,
+				this_lstnr))) {
+			pr_debug("Interrupted: exiting Listener Service = %d\n",
+						(uint32_t)data->listener.id);
+			/* woken up for different reason */
+			return -ERESTARTSYS;
+		}
+
+		if (data->abort) {
+			pr_err("Aborting Listener Service = %d\n",
+					(uint32_t)data->listener.id);
+			return -ENODEV;
+		}
+		mutex_lock(&listener_access_lock);
+		this_lstnr->rcv_req_flag = 0;
+		mutex_unlock(&listener_access_lock);
+		break;
+	}
+	return ret;
+}
+
+static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
+{
+	unsigned char app_arch = 0;
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+
+	app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+
+	switch (app_arch) {
+	case ELFCLASS32: {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		if (fw_entry->size < sizeof(*ehdr)) {
+			pr_err("%s: Not big enough to be an elf32 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+			pr_err("%s: Not an elf32 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (ehdr->e_phnum == 0) {
+			pr_err("%s: No loadable segments\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+		    sizeof(struct elf32_hdr) > fw_entry->size) {
+			pr_err("%s: Program headers not within mdt\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		break;
+	}
+	case ELFCLASS64: {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		if (fw_entry->size < sizeof(*ehdr64)) {
+			pr_err("%s: Not big enough to be an elf64 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
+			pr_err("%s: Not an elf64 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (ehdr64->e_phnum == 0) {
+			pr_err("%s: No loadable segments\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
+		    sizeof(struct elf64_hdr) > fw_entry->size) {
+			pr_err("%s: Program headers not within mdt\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		break;
+	}
+	default: {
+		pr_err("QSEE app arch %u is not supported\n", app_arch);
+		return false;
+	}
+	}
+	return true;
+}
+
+static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
+					uint32_t *app_arch)
+{
+	int ret = -1;
+	int i = 0, rc = 0;
+	const struct firmware *fw_entry = NULL;
+	char fw_name[MAX_APP_NAME_SIZE];
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+	int num_images = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
+	rc = request_firmware(&fw_entry, fw_name,  qseecom.dev);
+	if (rc) {
+		pr_err("error with request_firmware\n");
+		ret = -EIO;
+		goto err;
+	}
+	if (!__qseecom_is_fw_image_valid(fw_entry)) {
+		ret = -EIO;
+		goto err;
+	}
+	*app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+	*fw_size = fw_entry->size;
+	if (*app_arch == ELFCLASS32) {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		num_images = ehdr->e_phnum;
+	} else if (*app_arch == ELFCLASS64) {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		num_images = ehdr64->e_phnum;
+	} else {
+		pr_err("QSEE %s app, arch %u is not supported\n",
+						appname, *app_arch);
+		ret = -EIO;
+		goto err;
+	}
+	pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
+	release_firmware(fw_entry);
+	fw_entry = NULL;
+	for (i = 0; i < num_images; i++) {
+		memset(fw_name, 0, sizeof(fw_name));
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
+		ret = request_firmware(&fw_entry, fw_name, qseecom.dev);
+		if (ret)
+			goto err;
+		if (*fw_size > U32_MAX - fw_entry->size) {
+			pr_err("QSEE %s app file size overflow\n", appname);
+			ret = -EINVAL;
+			goto err;
+		}
+		*fw_size += fw_entry->size;
+		release_firmware(fw_entry);
+		fw_entry = NULL;
+	}
+
+	return ret;
+err:
+	if (fw_entry)
+		release_firmware(fw_entry);
+	*fw_size = 0;
+	return ret;
+}
+
+static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
+				uint32_t fw_size,
+				struct qseecom_load_app_ireq *load_req)
+{
+	int ret = -1;
+	int i = 0, rc = 0;
+	const struct firmware *fw_entry = NULL;
+	char fw_name[MAX_APP_NAME_SIZE];
+	u8 *img_data_ptr = img_data;
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+	int num_images = 0;
+	unsigned char app_arch = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
+	rc = request_firmware(&fw_entry, fw_name,  qseecom.dev);
+	if (rc) {
+		ret = -EIO;
+		goto err;
+	}
+
+	load_req->img_len = fw_entry->size;
+	if (load_req->img_len > fw_size) {
+		pr_err("app %s size %zu is larger than buf size %u\n",
+			appname, fw_entry->size, fw_size);
+		ret = -EINVAL;
+		goto err;
+	}
+	memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
+	img_data_ptr = img_data_ptr + fw_entry->size;
+	load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
+
+	app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+	if (app_arch == ELFCLASS32) {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		num_images = ehdr->e_phnum;
+	} else if (app_arch == ELFCLASS64) {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		num_images = ehdr64->e_phnum;
+	} else {
+		pr_err("QSEE %s app, arch %u is not supported\n",
+						appname, app_arch);
+		ret = -EIO;
+		goto err;
+	}
+	release_firmware(fw_entry);
+	fw_entry = NULL;
+	for (i = 0; i < num_images; i++) {
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
+		ret = request_firmware(&fw_entry, fw_name,  qseecom.dev);
+		if (ret) {
+			pr_err("Failed to locate blob %s\n", fw_name);
+			goto err;
+		}
+		if ((fw_entry->size > U32_MAX - load_req->img_len) ||
+			(fw_entry->size + load_req->img_len > fw_size)) {
+			pr_err("Invalid file size for %s\n", fw_name);
+			ret = -EINVAL;
+			goto err;
+		}
+		memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
+		img_data_ptr = img_data_ptr + fw_entry->size;
+		load_req->img_len += fw_entry->size;
+		release_firmware(fw_entry);
+		fw_entry = NULL;
+	}
+	return ret;
+err:
+	release_firmware(fw_entry);
+	return ret;
+}
+
+static int __qseecom_alloc_coherent_buf(
+			uint32_t size, u8 **vaddr, phys_addr_t *paddr)
+{
+	dma_addr_t coh_pmem;
+	void *buf = NULL;
+
+	/* Allocate a contiguous kernel buffer */
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	buf = dma_alloc_coherent(qseecom.dev,
+			size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("failed to alloc memory for size %d\n", size);
+		return -ENOMEM;
+	}
+	*vaddr = buf;
+	*paddr = coh_pmem;
+	return 0;
+}
+
+static void __qseecom_free_coherent_buf(uint32_t size,
+				u8 *vaddr, phys_addr_t paddr)
+{
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	dma_free_coherent(qseecom.dev, size, vaddr, paddr);
+}
+
+static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
+				uint32_t *app_id)
+{
+	int ret = -1;
+	uint32_t fw_size = 0;
+	struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+	struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
+	struct qseecom_command_scm_resp resp;
+	u8 *img_data = NULL;
+	phys_addr_t pa = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	uint32_t app_arch = 0;
+
+	if (!data || !appname || !app_id) {
+		pr_err("Null pointer to data or appname or appid\n");
+		return -EINVAL;
+	}
+	*app_id = 0;
+	if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
+		return -EIO;
+	data->client.app_arch = app_arch;
+
+	/* Check and load cmnlib */
+	if (qseecom.qsee_version > QSEEE_VERSION_00) {
+		if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib");
+			if (ret) {
+				pr_err("failed to load cmnlib\n");
+				return -EIO;
+			}
+			qseecom.commonlib_loaded = true;
+			pr_debug("cmnlib is loaded\n");
+		}
+
+		if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib64");
+			if (ret) {
+				pr_err("failed to load cmnlib64\n");
+				return -EIO;
+			}
+			qseecom.commonlib64_loaded = true;
+			pr_debug("cmnlib64 is loaded\n");
+		}
+	}
+
+	ret = __qseecom_alloc_coherent_buf(fw_size, &img_data, &pa);
+	if (ret)
+		return ret;
+
+	ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
+	if (ret) {
+		ret = -EIO;
+		goto exit_free_img_data;
+	}
+
+	/* Populate the load_req parameters */
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+		load_req.mdt_len = load_req.mdt_len;
+		load_req.img_len = load_req.img_len;
+		strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
+		load_req.phy_addr = (uint32_t)pa;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_app_ireq);
+	} else {
+		load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+		load_req_64bit.mdt_len = load_req.mdt_len;
+		load_req_64bit.img_len = load_req.img_len;
+		strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_free_img_data;
+		}
+	}
+
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_unregister_bus_bw_need;
+	}
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n", ret);
+		ret = -EIO;
+		goto exit_disable_clk_vote;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		*app_id = resp.data;
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd FAILED\n");
+		else
+			*app_id = resp.data;
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
+		break;
+	default:
+		pr_err("scm call return unknown response %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+
+exit_disable_clk_vote:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_unregister_bus_bw_need:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+
+exit_free_img_data:
+	if (img_data)
+		__qseecom_free_coherent_buf(fw_size, img_data, pa);
+	return ret;
+}
+
+static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
+					char *cmnlib_name)
+{
+	int ret = 0;
+	uint32_t fw_size = 0;
+	struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+	struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
+	struct qseecom_command_scm_resp resp;
+	u8 *img_data = NULL;
+	phys_addr_t pa = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	uint32_t app_arch = 0;
+
+	if (!cmnlib_name) {
+		pr_err("cmnlib_name is NULL\n");
+		return -EINVAL;
+	}
+	if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
+		pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
+			cmnlib_name, strlen(cmnlib_name));
+		return -EINVAL;
+	}
+
+	if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
+		return -EIO;
+
+	ret = __qseecom_alloc_coherent_buf(fw_size, &img_data, &pa);
+	if (ret)
+		return -EIO;
+
+	ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
+	if (ret) {
+		ret = -EIO;
+		goto exit_free_img_data;
+	}
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.phy_addr = (uint32_t)pa;
+		load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
+	} else {
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+		load_req_64bit.img_len = load_req.img_len;
+		load_req_64bit.mdt_len = load_req.mdt_len;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_free_img_data;
+		}
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_unregister_bus_bw_need;
+	}
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+							&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n", ret);
+		ret = -EIO;
+		goto exit_disable_clk_vote;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed w/response result%d\n", resp.result);
+		ret = -EINVAL;
+		goto exit_disable_clk_vote;
+	case  QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd failed err: %d\n", ret);
+			goto exit_disable_clk_vote;
+		}
+		break;
+	default:
+		pr_err("scm call return unknown response %d\n",	resp.result);
+		ret = -EINVAL;
+		goto exit_disable_clk_vote;
+	}
+
+exit_disable_clk_vote:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_unregister_bus_bw_need:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+
+exit_free_img_data:
+	if (img_data)
+		__qseecom_free_coherent_buf(fw_size, img_data, pa);
+	return ret;
+}
+
+static int qseecom_unload_commonlib_image(void)
+{
+	int ret = -EINVAL;
+	struct qseecom_unload_lib_image_ireq unload_req = {0};
+	struct qseecom_command_scm_resp resp;
+
+	/* Populate the remaining parameters */
+	unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
+			sizeof(struct qseecom_unload_lib_image_ireq),
+						&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload lib failed : ret %d\n", ret);
+		ret = -EIO;
+	} else {
+		switch (resp.result) {
+		case QSEOS_RESULT_SUCCESS:
+			break;
+		case QSEOS_RESULT_FAILURE:
+			pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
+			break;
+		default:
+			pr_err("scm call return unknown response %d\n",
+					resp.result);
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int qseecom_start_app(struct qseecom_handle **handle,
+						char *app_name, uint32_t size)
+{
+	int32_t ret = 0;
+	unsigned long flags = 0;
+	struct qseecom_dev_handle *data = NULL;
+	struct qseecom_check_app_ireq app_ireq;
+	struct qseecom_registered_app_list *entry = NULL;
+	struct qseecom_registered_kclient_list *kclient_entry = NULL;
+	bool found_app = false;
+	phys_addr_t pa;
+	u8 *va = NULL;
+	uint32_t fw_size, app_arch;
+	uint32_t app_id = 0;
+
+	__qseecom_processing_pending_lsnr_unregister();
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+	if (!app_name) {
+		pr_err("failed to get the app name\n");
+		return -EINVAL;
+	}
+
+	if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
+		pr_err("The app_name (%s) with length %zu is not valid\n",
+			app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
+		return -EINVAL;
+	}
+
+	*handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
+	if (!(*handle))
+		return -ENOMEM;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		kfree(*handle);
+		*handle = NULL;
+		return -ENOMEM;
+	}
+	data->abort = 0;
+	data->type = QSEECOM_CLIENT_APP;
+	data->released = false;
+	data->client.sb_length = size;
+	data->client.user_virt_sb_base = 0;
+
+	init_waitqueue_head(&data->abort_wq);
+
+	mutex_lock(&app_access_lock);
+
+	app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
+	ret = __qseecom_check_app_exists(app_ireq, &app_id);
+	if (ret)
+		goto err;
+
+	strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
+	if (app_id) {
+		pr_warn("App id %d for [%s] app exists\n", app_id,
+			(char *)app_ireq.app_name);
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+				&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				if (entry->ref_cnt == U32_MAX) {
+					pr_err("App %d (%s) ref_cnt overflow\n",
+						app_id, app_ireq.app_name);
+					ret = -EINVAL;
+					goto err;
+				}
+				entry->ref_cnt++;
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		if (!found_app)
+			pr_warn("App_id %d [%s] was loaded but not registered\n",
+					ret, (char *)app_ireq.app_name);
+	} else {
+		/* load the app and get the app_id  */
+		pr_debug("%s: Loading app for the first time'\n",
+				qseecom.pdev->init_name);
+		ret = __qseecom_load_fw(data, app_name, &app_id);
+		if (ret < 0)
+			goto err;
+	}
+	data->client.app_id = app_id;
+	if (!found_app) {
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			ret =  -ENOMEM;
+			goto err;
+		}
+		entry->app_id = app_id;
+		entry->ref_cnt = 1;
+		strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
+		if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
+			ret = -EIO;
+			kfree(entry);
+			goto err;
+		}
+		entry->app_arch = app_arch;
+		entry->app_blocked = false;
+		entry->blocked_on_listener_id = 0;
+		entry->check_block = 0;
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+									flags);
+	}
+
+	/* Get the physical address of the req/resp buffer */
+	ret = __qseecom_alloc_coherent_buf(size, &va, &pa);
+	if (ret) {
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		goto err;
+	}
+
+	/* Populate the structure for sending scm call to load image */
+	data->client.sb_virt = va;
+	data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
+	data->client.sb_phys = (phys_addr_t)pa;
+	(*handle)->dev = (void *)data;
+	(*handle)->sbuf = (unsigned char *)data->client.sb_virt;
+	(*handle)->sbuf_len = data->client.sb_length;
+
+	kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
+	if (!kclient_entry) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	kclient_entry->handle = *handle;
+
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+	list_add_tail(&kclient_entry->list,
+			&qseecom.registered_kclient_list_head);
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+	mutex_unlock(&app_access_lock);
+	return 0;
+
+err:
+	if (va)
+		__qseecom_free_coherent_buf(size, va, pa);
+	kfree(data);
+	kfree(*handle);
+	*handle = NULL;
+	mutex_unlock(&app_access_lock);
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_start_app);
+
+int qseecom_shutdown_app(struct qseecom_handle **handle)
+{
+	int ret = -EINVAL;
+	struct qseecom_dev_handle *data;
+
+	struct qseecom_registered_kclient_list *kclient = NULL;
+	unsigned long flags = 0;
+	bool found_handle = false;
+
+	__qseecom_processing_pending_lsnr_unregister();
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+
+	if ((handle == NULL)  || (*handle == NULL)) {
+		pr_err("Handle is not initialized\n");
+		return -EINVAL;
+	}
+	data = (struct qseecom_dev_handle *) ((*handle)->dev);
+	mutex_lock(&app_access_lock);
+
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+	list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
+				list) {
+		if (kclient->handle == (*handle)) {
+			list_del(&kclient->list);
+			found_handle = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+	if (!found_handle)
+		pr_err("Unable to find the handle, exiting\n");
+	else
+		ret = qseecom_unload_app(data, false);
+
+	mutex_unlock(&app_access_lock);
+	if (ret == 0) {
+		if (data->client.sb_virt)
+			__qseecom_free_coherent_buf(data->client.sb_length,
+				data->client.sb_virt, data->client.sb_phys);
+		kzfree(data);
+		kzfree(*handle);
+		kzfree(kclient);
+		*handle = NULL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_shutdown_app);
+
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+			uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
+{
+	int ret = 0;
+	struct qseecom_send_cmd_req req = {0, 0, 0, 0};
+	struct qseecom_dev_handle *data;
+	bool perf_enabled = false;
+
+	__qseecom_processing_pending_lsnr_unregister();
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+
+	if (handle == NULL) {
+		pr_err("Handle is not initialized\n");
+		return -EINVAL;
+	}
+	data = handle->dev;
+
+	req.cmd_req_len = sbuf_len;
+	req.resp_len = rbuf_len;
+	req.cmd_req_buf = send_buf;
+	req.resp_buf = resp_buf;
+
+	if (__validate_send_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	mutex_lock(&app_access_lock);
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+		if (ret) {
+			pr_err("Failed to set bw.\n");
+			mutex_unlock(&app_access_lock);
+			return ret;
+		}
+	}
+	/*
+	 * On targets where crypto clock is handled by HLOS,
+	 * if clk_access_cnt is zero and perf_enabled is false,
+	 * then the crypto clock was not enabled before sending cmd
+	 * to tz, qseecom will enable the clock to avoid service failure.
+	 */
+	if (!qseecom.no_clock_support &&
+		!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+		pr_debug("ce clock is not enabled!\n");
+		ret = qseecom_perf_enable(data);
+		if (ret) {
+			pr_err("Failed to vote for clock with err %d\n",
+						ret);
+			mutex_unlock(&app_access_lock);
+			return -EINVAL;
+		}
+		perf_enabled = true;
+	}
+	if (!strcmp(data->client.app_name, "securemm"))
+		data->use_legacy_cmd = true;
+
+	dmac_flush_range(req.cmd_req_buf, req.cmd_req_buf + req.cmd_req_len);
+
+	ret = __qseecom_send_cmd(data, &req);
+
+	dmac_flush_range(req.resp_buf, req.resp_buf + req.resp_len);
+
+	data->use_legacy_cmd = false;
+	if (qseecom.support_bus_scaling)
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+
+	if (perf_enabled) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	}
+
+	mutex_unlock(&app_access_lock);
+
+	if (ret)
+		return ret;
+
+	pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
+			req.resp_len, req.resp_buf);
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_send_command);
+
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
+{
+	int ret = 0;
+
+	if ((handle == NULL) || (handle->dev == NULL)) {
+		pr_err("No valid kernel client\n");
+		return -EINVAL;
+	}
+	if (high) {
+		if (qseecom.support_bus_scaling) {
+			mutex_lock(&qsee_bw_mutex);
+			__qseecom_register_bus_bandwidth_needs(handle->dev,
+									HIGH);
+			mutex_unlock(&qsee_bw_mutex);
+		} else {
+			ret = qseecom_perf_enable(handle->dev);
+			if (ret)
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+		}
+	} else {
+		if (!qseecom.support_bus_scaling) {
+			qsee_disable_clock_vote(handle->dev, CLK_DFAB);
+			qsee_disable_clock_vote(handle->dev, CLK_SFPB);
+		} else {
+			mutex_lock(&qsee_bw_mutex);
+			qseecom_unregister_bus_bandwidth_needs(handle->dev);
+			mutex_unlock(&qsee_bw_mutex);
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_set_bandwidth);
+
+int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
+{
+	struct qseecom_registered_app_list dummy_app_entry = { {0} };
+	struct qseecom_dev_handle dummy_private_data = {0};
+	struct qseecom_command_scm_resp resp;
+	int ret = 0;
+
+	if (!desc) {
+		pr_err("desc is NULL\n");
+		return -EINVAL;
+	}
+
+	resp.result = desc->ret[0];	/*req_cmd*/
+	resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
+	resp.data = desc->ret[2];	/*listener_id*/
+
+	dummy_private_data.client.app_id = desc->ret[1];
+	dummy_app_entry.app_id = desc->ret[1];
+
+	mutex_lock(&app_access_lock);
+	if (qseecom.qsee_reentrancy_support)
+		ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
+					&dummy_private_data);
+	else
+		ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
+					&resp);
+	mutex_unlock(&app_access_lock);
+	if (ret)
+		pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
+			(int)desc->ret[0], (int)desc->ret[2],
+			(int)desc->ret[1], ret);
+	desc->ret[0] = resp.result;
+	desc->ret[1] = resp.resp_type;
+	desc->ret[2] = resp.data;
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
+
+static int qseecom_send_resp(void)
+{
+	qseecom.send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
+{
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+	pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (this_lstnr == NULL)
+		return -EINVAL;
+	qseecom.send_resp_flag = 1;
+	this_lstnr->send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
+			struct qseecom_send_modfd_listener_resp *resp,
+			struct qseecom_registered_listener_list *this_lstnr)
+{
+	int i;
+
+	if (!data || !resp || !this_lstnr) {
+		pr_err("listener handle or resp msg is null\n");
+		return -EINVAL;
+	}
+
+	if (resp->resp_buf_ptr == NULL) {
+		pr_err("resp buffer is null\n");
+		return -EINVAL;
+	}
+	/* validate resp buf length */
+	if ((resp->resp_len == 0) ||
+			(resp->resp_len > this_lstnr->sb_length)) {
+		pr_err("resp buf length %d not valid\n", resp->resp_len);
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)this_lstnr->user_virt_sb_base >
+					(ULONG_MAX - this_lstnr->sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	/* validate resp buf */
+	if (((uintptr_t)resp->resp_buf_ptr <
+		(uintptr_t)this_lstnr->user_virt_sb_base) ||
+		((uintptr_t)resp->resp_buf_ptr >=
+		((uintptr_t)this_lstnr->user_virt_sb_base +
+				this_lstnr->sb_length)) ||
+		(((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
+		((uintptr_t)this_lstnr->user_virt_sb_base +
+						this_lstnr->sb_length))) {
+		pr_err("resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
+			pr_err("Invalid offset %d = 0x%x\n",
+				i, resp->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+				void __user *argp, bool is_64bit_addr)
+{
+	struct qseecom_send_modfd_listener_resp resp;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+	if (copy_from_user(&resp, argp, sizeof(resp))) {
+		pr_err("copy_from_user failed\n");
+		return -EINVAL;
+	}
+
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (this_lstnr == NULL)
+		return -EINVAL;
+
+	if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
+		return -EINVAL;
+
+	resp.resp_buf_ptr = this_lstnr->sb_virt +
+		(uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
+
+	if (!is_64bit_addr)
+		__qseecom_update_cmd_buf(&resp, false, data);
+	else
+		__qseecom_update_cmd_buf_64(&resp, false, data);
+	qseecom.send_resp_flag = 1;
+	this_lstnr->send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	return __qseecom_send_modfd_resp(data, argp, false);
+}
+
+static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	return __qseecom_send_modfd_resp(data, argp, true);
+}
+
+static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	struct qseecom_qseos_version_req req;
+
+	if (copy_from_user(&req, argp, sizeof(req))) {
+		pr_err("copy_from_user failed\n");
+		return -EINVAL;
+	}
+	req.qseos_version = qseecom.qseos_version;
+	if (copy_to_user(argp, &req, sizeof(req))) {
+		pr_err("copy_to_user failed\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
+{
+	int rc = 0;
+	struct qseecom_clk *qclk = NULL;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	if (ce == CLK_CE_DRV)
+		qclk = &qseecom.ce_drv;
+
+	if (qclk == NULL) {
+		pr_err("CLK type not supported\n");
+		return -EINVAL;
+	}
+	mutex_lock(&clk_access_lock);
+
+	if (qclk->clk_access_cnt == ULONG_MAX) {
+		pr_err("clk_access_cnt beyond limitation\n");
+		goto err;
+	}
+	if (qclk->clk_access_cnt > 0) {
+		qclk->clk_access_cnt++;
+		mutex_unlock(&clk_access_lock);
+		return rc;
+	}
+
+	/* Enable CE core clk */
+	if (qclk->ce_core_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_core_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core clk\n");
+			goto err;
+		}
+	}
+	/* Enable CE clk */
+	if (qclk->ce_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE iface clk\n");
+			goto ce_clk_err;
+		}
+	}
+	/* Enable AXI clk */
+	if (qclk->ce_bus_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_bus_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE bus clk\n");
+			goto ce_bus_clk_err;
+		}
+	}
+	qclk->clk_access_cnt++;
+	mutex_unlock(&clk_access_lock);
+	return 0;
+
+ce_bus_clk_err:
+	if (qclk->ce_clk != NULL)
+		clk_disable_unprepare(qclk->ce_clk);
+ce_clk_err:
+	if (qclk->ce_core_clk != NULL)
+		clk_disable_unprepare(qclk->ce_core_clk);
+err:
+	mutex_unlock(&clk_access_lock);
+	return -EIO;
+}
+
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+
+	if (qseecom.no_clock_support)
+		return;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	mutex_lock(&clk_access_lock);
+
+	if (qclk->clk_access_cnt == 0) {
+		mutex_unlock(&clk_access_lock);
+		return;
+	}
+
+	if (qclk->clk_access_cnt == 1) {
+		if (qclk->ce_clk != NULL)
+			clk_disable_unprepare(qclk->ce_clk);
+		if (qclk->ce_core_clk != NULL)
+			clk_disable_unprepare(qclk->ce_core_clk);
+		if (qclk->ce_bus_clk != NULL)
+			clk_disable_unprepare(qclk->ce_bus_clk);
+	}
+	qclk->clk_access_cnt--;
+	mutex_unlock(&clk_access_lock);
+}
+
+static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
+						int32_t clk_type)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	qclk = &qseecom.qsee;
+	if (!qseecom.qsee_perf_client)
+		return ret;
+
+	switch (clk_type) {
+	case CLK_DFAB:
+		mutex_lock(&qsee_bw_mutex);
+		if (!qseecom.qsee_bw_count) {
+			if (qseecom.qsee_sfpb_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, 3);
+			else {
+				if (qclk->ce_core_src_clk != NULL)
+					ret = __qseecom_enable_clk(CLK_QSEE);
+				if (!ret) {
+					ret =
+					msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 1);
+					if ((ret) &&
+						(qclk->ce_core_src_clk != NULL))
+						__qseecom_disable_clk(CLK_QSEE);
+				}
+			}
+			if (ret)
+				pr_err("DFAB Bandwidth req failed (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_bw_count++;
+				data->perf_enabled = true;
+			}
+		} else {
+			qseecom.qsee_bw_count++;
+			data->perf_enabled = true;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	case CLK_SFPB:
+		mutex_lock(&qsee_bw_mutex);
+		if (!qseecom.qsee_sfpb_bw_count) {
+			if (qseecom.qsee_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, 3);
+			else {
+				if (qclk->ce_core_src_clk != NULL)
+					ret = __qseecom_enable_clk(CLK_QSEE);
+				if (!ret) {
+					ret =
+					msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 2);
+					if ((ret) &&
+						(qclk->ce_core_src_clk != NULL))
+						__qseecom_disable_clk(CLK_QSEE);
+				}
+			}
+
+			if (ret)
+				pr_err("SFPB Bandwidth req failed (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_sfpb_bw_count++;
+				data->fast_load_enabled = true;
+			}
+		} else {
+			qseecom.qsee_sfpb_bw_count++;
+			data->fast_load_enabled = true;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	default:
+		pr_err("Clock type not defined\n");
+		break;
+	}
+	return ret;
+}
+
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
+						int32_t clk_type)
+{
+	int32_t ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+
+	if (qseecom.no_clock_support)
+		return;
+	if (!qseecom.qsee_perf_client)
+		return;
+
+	switch (clk_type) {
+	case CLK_DFAB:
+		mutex_lock(&qsee_bw_mutex);
+		if (qseecom.qsee_bw_count == 0) {
+			pr_err("Client error.Extra call to disable DFAB clk\n");
+			mutex_unlock(&qsee_bw_mutex);
+			return;
+		}
+
+		if (qseecom.qsee_bw_count == 1) {
+			if (qseecom.qsee_sfpb_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, 2);
+			else {
+				ret = msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 0);
+				if ((!ret) && (qclk->ce_core_src_clk != NULL))
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+			if (ret)
+				pr_err("SFPB Bandwidth req fail (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_bw_count--;
+				data->perf_enabled = false;
+			}
+		} else {
+			qseecom.qsee_bw_count--;
+			data->perf_enabled = false;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	case CLK_SFPB:
+		mutex_lock(&qsee_bw_mutex);
+		if (qseecom.qsee_sfpb_bw_count == 0) {
+			pr_err("Client error.Extra call to disable SFPB clk\n");
+			mutex_unlock(&qsee_bw_mutex);
+			return;
+		}
+		if (qseecom.qsee_sfpb_bw_count == 1) {
+			if (qseecom.qsee_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 1);
+			else {
+				ret = msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 0);
+				if ((!ret) && (qclk->ce_core_src_clk != NULL))
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+			if (ret)
+				pr_err("SFPB Bandwidth req fail (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_sfpb_bw_count--;
+				data->fast_load_enabled = false;
+			}
+		} else {
+			qseecom.qsee_sfpb_bw_count--;
+			data->fast_load_enabled = false;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	default:
+		pr_err("Clock type not defined\n");
+		break;
+	}
+
+}
+
+static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_load_img_req load_img_req;
+	int uret = 0;
+	int ret = 0;
+	phys_addr_t pa = 0;
+	size_t len;
+	struct qseecom_load_app_ireq load_req;
+	struct qseecom_load_app_64bit_ireq load_req_64bit;
+	struct qseecom_command_scm_resp resp;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sg_table *sgt = NULL;
+	struct dma_buf_attachment *attach = NULL;
+	struct dma_buf *dmabuf = NULL;
+	void *va = NULL;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&load_img_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_load_img_req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	/* Get the handle of the shared fd */
+	ret = qseecom_vaddr_map(load_img_req.ifd_data_fd, &pa, &va,
+					&sgt, &attach, &len, &dmabuf);
+	if (ret) {
+		pr_err("Failed to map vaddr for ion_fd %d\n",
+			load_img_req.ifd_data_fd);
+		return -ENOMEM;
+	}
+	if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
+		pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
+				len, load_img_req.mdt_len,
+				load_img_req.img_len);
+		ret = -EINVAL;
+		goto exit_cpu_restore;
+	}
+
+	/* Populate the structure for sending scm call to load image */
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+		load_req.mdt_len = load_img_req.mdt_len;
+		load_req.img_len = load_img_req.img_len;
+		load_req.phy_addr = (uint32_t)pa;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_app_ireq);
+	} else {
+		load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+		load_req_64bit.mdt_len = load_img_req.mdt_len;
+		load_req_64bit.img_len = load_img_req.img_len;
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_cpu_restore;
+		}
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_register_bus_bandwidth_needs;
+	}
+	ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_CLEAN);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clock;
+	}
+	/*  SCM_CALL to load the external elf */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n",
+				ret);
+		ret = -EFAULT;
+		goto exit_disable_clock;
+	}
+
+	ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clock;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		pr_err("%s: qseos result incomplete\n", __func__);
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd failed: err: %d\n", ret);
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+		ret = -EFAULT;
+		break;
+	default:
+		pr_err("scm_call response result %d not supported\n",
+							resp.result);
+		ret = -EFAULT;
+		break;
+	}
+
+exit_disable_clock:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_register_bus_bandwidth_needs:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		uret = qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+		if (uret)
+			pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
+								uret, ret);
+	}
+
+exit_cpu_restore:
+	if (dmabuf)
+		qseecom_vaddr_unmap(va, sgt, attach, dmabuf);
+	return ret;
+}
+
+static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_unload_app_ireq req;
+
+	/* unavailable client app */
+	data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
+
+	/* Populate the structure for sending scm call to unload image */
+	req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
+
+	/* SCM_CALL to unload the external elf */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+			sizeof(struct qseecom_unload_app_ireq),
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload failed : ret %d\n",
+				ret);
+		ret = -EFAULT;
+		goto qseecom_unload_external_elf_scm_err;
+	}
+	if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd fail err: %d\n",
+					ret);
+	} else {
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call to unload image failed resp.result =%d\n",
+						resp.result);
+			ret = -EFAULT;
+		}
+	}
+
+qseecom_unload_external_elf_scm_err:
+	return ret;
+}
+
+static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	int32_t ret = 0;
+	struct qseecom_qseos_app_load_query query_req = { {0} };
+	struct qseecom_check_app_ireq req;
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+	uint32_t app_arch = 0, app_id = 0;
+	bool found_app = false;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&query_req, (void __user *)argp,
+				sizeof(struct qseecom_qseos_app_load_query))) {
+		pr_err("copy_from_user failed\n");
+		ret = -EFAULT;
+		goto exit_free;
+	}
+
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
+	strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
+
+	ret = __qseecom_check_app_exists(req, &app_id);
+	if (ret) {
+		pr_err(" scm call to check if app is loaded failed\n");
+		goto exit_free;
+	}
+	if (app_id) {
+		pr_debug("App id %d (%s) already exists\n", app_id,
+			(char *)(req.app_name));
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+				&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				app_arch = entry->app_arch;
+				if (entry->ref_cnt == U32_MAX) {
+					pr_err("App %d (%s) ref_cnt overflow\n",
+						app_id, req.app_name);
+					ret = -EINVAL;
+					goto exit_free;
+				}
+				entry->ref_cnt++;
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		data->client.app_id = app_id;
+		query_req.app_id = app_id;
+		if (app_arch) {
+			data->client.app_arch = app_arch;
+			query_req.app_arch = app_arch;
+		} else {
+			data->client.app_arch = 0;
+			query_req.app_arch = 0;
+		}
+		strlcpy(data->client.app_name, query_req.app_name,
+				MAX_APP_NAME_SIZE);
+		/*
+		 * If app was loaded by appsbl before and was not registered,
+		 * regiser this app now.
+		 */
+		if (!found_app) {
+			pr_debug("Register app %d [%s] which was loaded before\n",
+					ret, (char *)query_req.app_name);
+			entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+			if (!entry) {
+				pr_err("kmalloc for app entry failed\n");
+				ret = -ENOMEM;
+				goto exit_free;
+			}
+			entry->app_id = app_id;
+			entry->ref_cnt = 1;
+			entry->app_arch = data->client.app_arch;
+			strlcpy(entry->app_name, data->client.app_name,
+				MAX_APP_NAME_SIZE);
+			entry->app_blocked = false;
+			entry->blocked_on_listener_id = 0;
+			entry->check_block = 0;
+			spin_lock_irqsave(&qseecom.registered_app_list_lock,
+				flags);
+			list_add_tail(&entry->list,
+				&qseecom.registered_app_list_head);
+			spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		}
+		if (copy_to_user(argp, &query_req, sizeof(query_req))) {
+			pr_err("copy_to_user failed\n");
+			ret = -EFAULT;
+			goto exit_free;
+		}
+		ret = -EEXIST;	/* app already loaded */
+		goto exit_free;
+	}
+
+exit_free:
+	return ret;	/* app not loaded */
+}
+
+static int __qseecom_get_ce_pipe_info(
+			enum qseecom_key_management_usage_type usage,
+			uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
+{
+	int ret = -EINVAL;
+	int i, j;
+	struct qseecom_ce_info_use *p = NULL;
+	int total = 0;
+	struct qseecom_ce_pipe_entry *pcepipe;
+
+	switch (usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", usage);
+		return -EINVAL;
+	}
+
+	for (j = 0; j < total; j++) {
+		if (p->unit_num == unit) {
+			pcepipe =  p->ce_pipe_entry;
+			for (i = 0; i < p->num_ce_pipe_entries; i++) {
+				(*ce_hw)[i] = pcepipe->ce_num;
+				*pipe = pcepipe->ce_pipe_pair;
+				pcepipe++;
+			}
+			ret = 0;
+			break;
+		}
+		p++;
+	}
+	return ret;
+}
+
+static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_generate_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_generate_ireq),
+				&resp, sizeof(resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+			pr_debug("Key ID exists.\n");
+			ret = 0;
+		} else {
+			pr_err("scm call to generate key failed : %d\n", ret);
+			ret = -EFAULT;
+		}
+		goto generate_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
+		pr_debug("Key ID exists.\n");
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+				pr_debug("Key ID exists.\n");
+				ret = 0;
+			} else {
+				pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("gen key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+generate_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_delete_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_delete_ireq),
+				&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+			pr_debug("Max attempts to input password reached.\n");
+			ret = -ERANGE;
+		} else {
+			pr_err("scm call to delete key failed : %d\n", ret);
+			ret = -EFAULT;
+		}
+		goto del_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+				pr_debug("Max attempts to input password reached.\n");
+				ret = -ERANGE;
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
+		pr_debug("Max attempts to input password reached.\n");
+		ret = -ERANGE;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Delete key scm call failed resp.result %d\n",
+							resp.result);
+		ret = -EINVAL;
+		break;
+	}
+del_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_select_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
+		ret = __qseecom_enable_clk(CLK_CE_DRV);
+		if (ret)
+			return ret;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_select_ireq),
+				&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+			pr_debug("Max attempts to input password reached.\n");
+			ret = -ERANGE;
+		} else if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		} else {
+			pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
+				ret);
+			ret = -EFAULT;
+		}
+		goto set_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			if (resp.result ==
+				QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+				pr_debug("Set Key operation under processing...\n");
+				ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+			}
+			if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+				pr_debug("Max attempts to input password reached.\n");
+				ret = -ERANGE;
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
+		pr_debug("Max attempts to input password reached.\n");
+		ret = -ERANGE;
+		break;
+	case QSEOS_RESULT_FAIL_PENDING_OPERATION:
+		pr_debug("Set Key operation under processing...\n");
+		ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Set key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+set_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	if (qseecom.qsee.instance != qseecom.ce_drv.instance)
+		__qseecom_disable_clk(CLK_CE_DRV);
+	return ret;
+}
+
+static int __qseecom_update_current_key_user_info(
+			struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_userinfo_update_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+				usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+		ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
+		&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		} else {
+			pr_err("scm call to update key userinfo failed: %d\n",
+									ret);
+			__qseecom_disable_clk(CLK_QSEE);
+			return -EFAULT;
+		}
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (resp.result ==
+			QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		}
+		if (ret)
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+		break;
+	case QSEOS_RESULT_FAIL_PENDING_OPERATION:
+		pr_debug("Update Key operation under processing...\n");
+		ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Set key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+
+static int qseecom_enable_ice_setup(int usage)
+{
+	int ret = 0;
+
+	if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("ufs", true);
+	else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("sdcc", true);
+
+	return ret;
+}
+
+static int qseecom_disable_ice_setup(int usage)
+{
+	int ret = 0;
+
+	if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("ufs", false);
+	else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("sdcc", false);
+
+	return ret;
+}
+
+static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
+{
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	int i;
+
+	switch (usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		p = qseecom.ce_info.fde;
+		total = qseecom.ce_info.num_fde;
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		p = qseecom.ce_info.pfe;
+		total = qseecom.ce_info.num_pfe;
+		break;
+	default:
+		pr_err("unsupported usage %d\n", usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+
+	for (i = 0; i < total; i++) {
+		if (p->unit_num == unit) {
+			pce_info_use = p;
+			break;
+		}
+		p++;
+	}
+	if (!pce_info_use) {
+		pr_err("can not find %d\n", unit);
+		return -EINVAL;
+	}
+	return pce_info_use->num_ce_pipe_entries;
+}
+
+static int qseecom_create_key(struct qseecom_dev_handle *data,
+			void __user *argp)
+{
+	int i;
+	uint32_t *ce_hw = NULL;
+	uint32_t pipe = 0;
+	int ret = 0;
+	uint32_t flags = 0;
+	struct qseecom_create_key_req create_key_req;
+	struct qseecom_key_generate_ireq generate_key_ireq;
+	struct qseecom_key_select_ireq set_key_ireq;
+	uint32_t entries = 0;
+
+	ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("unsupported usage %d\n", create_key_req.usage);
+		ret = -EFAULT;
+		return ret;
+	}
+	entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+					create_key_req.usage);
+	if (entries <= 0) {
+		pr_err("no ce instance for usage %d instance %d\n",
+			DEFAULT_CE_INFO_UNIT, create_key_req.usage);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+	if (!ce_hw) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
+			DEFAULT_CE_INFO_UNIT);
+	if (ret) {
+		pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+		ret = -EINVAL;
+		goto free_buf;
+	}
+
+	if (qseecom.fde_key_size)
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+	else
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+	if (qseecom.enable_key_wrap_in_ks)
+		flags |= ENABLE_KEY_WRAP_IN_KS;
+
+	generate_key_ireq.flags = flags;
+	generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
+	memset((void *)generate_key_ireq.key_id,
+			0, QSEECOM_KEY_ID_SIZE);
+	memset((void *)generate_key_ireq.hash32,
+			0, QSEECOM_HASH_SIZE);
+	memcpy((void *)generate_key_ireq.key_id,
+			(void *)key_id_array[create_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+	memcpy((void *)generate_key_ireq.hash32,
+			(void *)create_key_req.hash32,
+			QSEECOM_HASH_SIZE);
+
+	ret = __qseecom_generate_and_save_key(data,
+			create_key_req.usage, &generate_key_ireq);
+	if (ret) {
+		pr_err("Failed to generate key on storage: %d\n", ret);
+		goto free_buf;
+	}
+
+	for (i = 0; i < entries; i++) {
+		set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+		if (create_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+			set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+			set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+
+		} else if (create_key_req.usage ==
+				QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+			set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+			set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+
+		} else {
+			set_key_ireq.ce = ce_hw[i];
+			set_key_ireq.pipe = pipe;
+		}
+		set_key_ireq.flags = flags;
+
+		/* set both PIPE_ENC and PIPE_ENC_XTS*/
+		set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+		memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+		memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+		memcpy((void *)set_key_ireq.key_id,
+			(void *)key_id_array[create_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+		memcpy((void *)set_key_ireq.hash32,
+				(void *)create_key_req.hash32,
+				QSEECOM_HASH_SIZE);
+		/*
+		 * It will return false if it is GPCE based crypto instance or
+		 * ICE is setup properly
+		 */
+		ret = qseecom_enable_ice_setup(create_key_req.usage);
+		if (ret)
+			goto free_buf;
+
+		do {
+			ret = __qseecom_set_clear_ce_key(data,
+					create_key_req.usage,
+					&set_key_ireq);
+			/*
+			 * wait a little before calling scm again to let other
+			 * processes run
+			 */
+			if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+				msleep(50);
+
+		} while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+
+		qseecom_disable_ice_setup(create_key_req.usage);
+
+		if (ret) {
+			pr_err("Failed to create key: pipe %d, ce %d: %d\n",
+				pipe, ce_hw[i], ret);
+			goto free_buf;
+		} else {
+			pr_err("Set the key successfully\n");
+			if ((create_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
+			     (create_key_req.usage ==
+				QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
+				goto free_buf;
+		}
+	}
+
+free_buf:
+	kzfree(ce_hw);
+	return ret;
+}
+
+static int qseecom_wipe_key(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	uint32_t *ce_hw = NULL;
+	uint32_t pipe = 0;
+	int ret = 0;
+	uint32_t flags = 0;
+	int i, j;
+	struct qseecom_wipe_key_req wipe_key_req;
+	struct qseecom_key_delete_ireq delete_key_ireq;
+	struct qseecom_key_select_ireq clear_key_ireq;
+	uint32_t entries = 0;
+
+	ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("unsupported usage %d\n", wipe_key_req.usage);
+		ret = -EFAULT;
+		return ret;
+	}
+
+	entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+					wipe_key_req.usage);
+	if (entries <= 0) {
+		pr_err("no ce instance for usage %d instance %d\n",
+			DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+	if (!ce_hw) {
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
+				DEFAULT_CE_INFO_UNIT);
+	if (ret) {
+		pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+		ret = -EINVAL;
+		goto free_buf;
+	}
+
+	if (wipe_key_req.wipe_key_flag) {
+		delete_key_ireq.flags = flags;
+		delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
+		memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+		memcpy((void *)delete_key_ireq.key_id,
+			(void *)key_id_array[wipe_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+		memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+		ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
+					&delete_key_ireq);
+		if (ret) {
+			pr_err("Failed to delete key from ssd storage: %d\n",
+				ret);
+			ret = -EFAULT;
+			goto free_buf;
+		}
+	}
+
+	for (j = 0; j < entries; j++) {
+		clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+		if (wipe_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+			clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+			clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+		} else if (wipe_key_req.usage ==
+			QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+			clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+			clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+		} else {
+			clear_key_ireq.ce = ce_hw[j];
+			clear_key_ireq.pipe = pipe;
+		}
+		clear_key_ireq.flags = flags;
+		clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+		for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
+			clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
+		memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+		/*
+		 * It will return false if it is GPCE based crypto instance or
+		 * ICE is setup properly
+		 */
+		ret = qseecom_enable_ice_setup(wipe_key_req.usage);
+		if (ret)
+			goto free_buf;
+
+		ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
+					&clear_key_ireq);
+
+		qseecom_disable_ice_setup(wipe_key_req.usage);
+
+		if (ret) {
+			pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
+				pipe, ce_hw[j], ret);
+			ret = -EFAULT;
+			goto free_buf;
+		}
+	}
+
+free_buf:
+	kzfree(ce_hw);
+	return ret;
+}
+
+static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
+			void __user *argp)
+{
+	int ret = 0;
+	uint32_t flags = 0;
+	struct qseecom_update_key_userinfo_req update_key_req;
+	struct qseecom_key_userinfo_update_ireq ireq;
+
+	ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
+		return -EFAULT;
+	}
+
+	ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
+
+	if (qseecom.fde_key_size)
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+	else
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+	ireq.flags = flags;
+	memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+	memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
+	memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
+	memcpy((void *)ireq.key_id,
+		(void *)key_id_array[update_key_req.usage].desc,
+		QSEECOM_KEY_ID_SIZE);
+	memcpy((void *)ireq.current_hash32,
+		(void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
+	memcpy((void *)ireq.new_hash32,
+		(void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
+
+	do {
+		ret = __qseecom_update_current_key_user_info(data,
+						update_key_req.usage,
+						&ireq);
+		/*
+		 * wait a little before calling scm again to let other
+		 * processes run
+		 */
+		if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+			msleep(50);
+
+	} while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+	if (ret) {
+		pr_err("Failed to update key info: %d\n", ret);
+		return ret;
+	}
+	return ret;
+
+}
+static int qseecom_is_es_activated(void __user *argp)
+{
+	struct qseecom_is_es_activated_req req = {0};
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (qseecom.qsee_version < QSEE_VERSION_04) {
+		pr_err("invalid qsee version\n");
+		return -ENODEV;
+	}
+
+	if (argp == NULL) {
+		pr_err("arg is null\n");
+		return -EINVAL;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
+		&req, sizeof(req), &resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call failed\n");
+		return ret;
+	}
+
+	req.is_activated = resp.result;
+	ret = copy_to_user(argp, &req, sizeof(req));
+	if (ret) {
+		pr_err("copy_to_user failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qseecom_save_partition_hash(void __user *argp)
+{
+	struct qseecom_save_partition_hash_req req;
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	memset(&resp, 0x00, sizeof(resp));
+
+	if (qseecom.qsee_version < QSEE_VERSION_04) {
+		pr_err("invalid qsee version\n");
+		return -ENODEV;
+	}
+
+	if (argp == NULL) {
+		pr_err("arg is null\n");
+		return -EINVAL;
+	}
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
+		       (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qseecom_mdtp_cipher_dip(void __user *argp)
+{
+	struct qseecom_mdtp_cipher_dip_req req;
+	u32 tzbuflenin, tzbuflenout;
+	char *tzbufin = NULL, *tzbufout = NULL;
+	struct scm_desc desc = {0};
+	int ret;
+
+	do {
+		/* Copy the parameters from userspace */
+		if (argp == NULL) {
+			pr_err("arg is null\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		ret = copy_from_user(&req, argp, sizeof(req));
+		if (ret) {
+			pr_err("copy_from_user failed, ret= %d\n", ret);
+			break;
+		}
+
+		if (req.in_buf == NULL || req.out_buf == NULL ||
+			req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
+			req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
+				req.direction > 1) {
+			pr_err("invalid parameters\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		/* Copy the input buffer from userspace to kernel space */
+		tzbuflenin = PAGE_ALIGN(req.in_buf_size);
+		tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
+		if (!tzbufin) {
+			pr_err("error allocating in buffer\n");
+			ret = -ENOMEM;
+			break;
+		}
+
+		ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
+		if (ret) {
+			pr_err("copy_from_user failed, ret=%d\n", ret);
+			break;
+		}
+
+		dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
+
+		/* Prepare the output buffer in kernel space */
+		tzbuflenout = PAGE_ALIGN(req.out_buf_size);
+		tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
+		if (!tzbufout) {
+			pr_err("error allocating out buffer\n");
+			ret = -ENOMEM;
+			break;
+		}
+
+		dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
+
+		/* Send the command to TZ */
+		desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
+		desc.args[0] = virt_to_phys(tzbufin);
+		desc.args[1] = req.in_buf_size;
+		desc.args[2] = virt_to_phys(tzbufout);
+		desc.args[3] = req.out_buf_size;
+		desc.args[4] = req.direction;
+
+		ret = __qseecom_enable_clk(CLK_QSEE);
+		if (ret)
+			break;
+
+		ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
+
+		__qseecom_disable_clk(CLK_QSEE);
+
+		if (ret) {
+			pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
+				ret);
+			break;
+		}
+
+		/* Copy the output buffer from kernel space to userspace */
+		dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
+		ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
+		if (ret) {
+			pr_err("copy_to_user failed, ret=%d\n", ret);
+			break;
+		}
+	} while (0);
+
+	kzfree(tzbufin);
+	kzfree(tzbufout);
+
+	return ret;
+}
+
+static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
+				struct qseecom_qteec_req *req)
+{
+	if (!data || !data->client.sb_virt) {
+		pr_err("Client or client buf is not initialized\n");
+		return -EINVAL;
+	}
+
+	if (data->type != QSEECOM_CLIENT_APP)
+		return -EFAULT;
+
+	if (req->req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if (req->req_len + req->resp_len > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+		(req->req_len + req->resp_len), data->client.sb_length);
+		return -ENOMEM;
+	}
+
+	if (req->req_ptr == NULL || req->resp_ptr == NULL) {
+		pr_err("cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->req_ptr <
+			data->client.user_virt_sb_base) ||
+		((uintptr_t)req->req_ptr >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+
+	if (((uintptr_t)req->resp_ptr <
+			data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_ptr >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+
+	if ((req->req_len == 0) || (req->resp_len == 0)) {
+		pr_err("cmd buf lengtgh/response buf length not valid\n");
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
+		pr_err("Integer overflow in req_len & req_ptr\n");
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_ptr\n");
+		return -EINVAL;
+	}
+
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->req_ptr + req->req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length)) ||
+		(((uintptr_t)req->resp_ptr + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
+				uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+	struct scatterlist *sg = sg_ptr->sgl;
+	struct qseecom_sg_entry *sg_entry;
+	void *buf;
+	uint i;
+	size_t size;
+	dma_addr_t coh_pmem;
+
+	if (fd_idx >= MAX_ION_FD) {
+		pr_err("fd_idx [%d] is invalid\n", fd_idx);
+		return -ENOMEM;
+	}
+	/*
+	 * Allocate a buffer, populate it with number of entry plus
+	 * each sg entry's phy addr and length; then return the
+	 * phy_addr of the buffer.
+	 */
+	size = sizeof(uint32_t) +
+		sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	buf = dma_alloc_coherent(qseecom.dev,
+			size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("failed to alloc memory for sg buf\n");
+		return -ENOMEM;
+	}
+	*(uint32_t *)buf = sg_ptr->nents;
+	sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
+	for (i = 0; i < sg_ptr->nents; i++) {
+		sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
+		sg_entry->len = sg->length;
+		sg_entry++;
+		sg = sg_next(sg);
+	}
+	data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+	data->client.sec_buf_fd[fd_idx].vbase = buf;
+	data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+	data->client.sec_buf_fd[fd_idx].size = size;
+	return 0;
+}
+
+static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
+			struct qseecom_dev_handle *data, bool cleanup)
+{
+	int ret = 0;
+	int i = 0;
+	uint32_t *update;
+	struct sg_table *sg_ptr = NULL;
+	struct scatterlist *sg;
+	struct qseecom_param_memref *memref;
+	int ion_fd = -1;
+	struct dma_buf *dmabuf = NULL;
+	struct dma_buf_attachment *attach = NULL;
+
+	if (req == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req->ifd_data[i].fd > 0) {
+			ion_fd = req->ifd_data[i].fd;
+			if ((req->req_len < sizeof(uint32_t)) ||
+				(req->ifd_data[i].cmd_buf_offset >
+				req->req_len - sizeof(uint32_t))) {
+				pr_err("Invalid offset/req len 0x%x/0x%x\n",
+					req->req_len,
+					req->ifd_data[i].cmd_buf_offset);
+				return -EINVAL;
+			}
+			update = (uint32_t *)((char *) req->req_ptr +
+				req->ifd_data[i].cmd_buf_offset);
+			if (!update) {
+				pr_err("update pointer is NULL\n");
+				return -EINVAL;
+			}
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf);
+		if (ret) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		sg = sg_ptr->sgl;
+		if (sg == NULL) {
+			pr_err("sg is NULL\n");
+			goto err;
+		}
+		if ((sg_ptr->nents == 0) || (sg->length == 0)) {
+			pr_err("Num of scat entr (%d)or length(%d) invalid\n",
+					sg_ptr->nents, sg->length);
+			goto err;
+		}
+		/* clean up buf for pre-allocated fd */
+		if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
+			(*update)) {
+			if (data->client.sec_buf_fd[i].vbase)
+				dma_free_coherent(qseecom.dev,
+					data->client.sec_buf_fd[i].size,
+					data->client.sec_buf_fd[i].vbase,
+					data->client.sec_buf_fd[i].pbase);
+			memset((void *)update, 0,
+				sizeof(struct qseecom_param_memref));
+			memset(&(data->client.sec_buf_fd[i]), 0,
+				sizeof(struct qseecom_sec_buf_fd_info));
+			goto clean;
+		}
+
+		if (*update == 0) {
+			/* update buf for pre-allocated fd from secure heap*/
+			ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
+				sg_ptr);
+			if (ret) {
+				pr_err("Failed to handle buf for fd[%d]\n", i);
+				goto err;
+			}
+			memref = (struct qseecom_param_memref *)update;
+			memref->buffer =
+				(uint32_t)(data->client.sec_buf_fd[i].pbase);
+			memref->size =
+				(uint32_t)(data->client.sec_buf_fd[i].size);
+		} else {
+			/* update buf for fd from non-secure qseecom heap */
+			if (sg_ptr->nents != 1) {
+				pr_err("Num of scat entr (%d) invalid\n",
+					sg_ptr->nents);
+				goto err;
+			}
+			if (cleanup)
+				*update = 0;
+			else
+				*update = (uint32_t)sg_dma_address(sg_ptr->sgl);
+		}
+clean:
+		if (cleanup) {
+			ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_CLEAN);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			data->sglistinfo_ptr[i].indexAndFlags =
+				SGLISTINFO_SET_INDEX_FLAG(
+				(sg_ptr->nents == 1), 0,
+				req->ifd_data[i].cmd_buf_offset);
+			data->sglistinfo_ptr[i].sizeOrCount =
+				(sg_ptr->nents == 1) ?
+				sg->length : sg_ptr->nents;
+			data->sglist_cnt = i + 1;
+		}
+		/* unmap the dmabuf */
+		qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
+		sg_ptr = NULL;
+		dmabuf = NULL;
+		attach = NULL;
+	}
+	return ret;
+err:
+	if (!IS_ERR_OR_NULL(sg_ptr))
+		qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
+	return -ENOMEM;
+}
+
+static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
+				struct qseecom_qteec_req *req, uint32_t cmd_id)
+{
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_qteec_ireq ireq;
+	struct qseecom_qteec_64bit_ireq ireq_64bit;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	unsigned long flags;
+	int ret = 0;
+	int ret2 = 0;
+	uint32_t reqd_len_sb_in = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+	void *req_ptr = NULL;
+	void *resp_ptr = NULL;
+
+	ret  = __qseecom_qteec_validate_msg(data, req);
+	if (ret)
+		return ret;
+
+	req_ptr = req->req_ptr;
+	resp_ptr = req->resp_ptr;
+
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req->req_ptr);
+	req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req->resp_ptr);
+
+	if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+			(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+		ret = __qseecom_update_qteec_req_buf(
+			(struct qseecom_qteec_modfd_req *)req, data, false);
+		if (ret)
+			return ret;
+	}
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		ireq.app_id = data->client.app_id;
+		ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq.req_len = req->req_len;
+		ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq.resp_len = req->resp_len;
+		ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+		ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&ireq;
+		cmd_len = sizeof(struct qseecom_qteec_ireq);
+	} else {
+		ireq_64bit.app_id = data->client.app_id;
+		ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq_64bit.req_len = req->req_len;
+		ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq_64bit.resp_len = req->resp_len;
+		if ((data->client.app_arch == ELFCLASS32) &&
+			((ireq_64bit.req_ptr >=
+				PHY_ADDR_4G - ireq_64bit.req_len) ||
+			(ireq_64bit.resp_ptr >=
+				PHY_ADDR_4G - ireq_64bit.resp_len))){
+			pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
+				data->client.app_name, data->client.app_id);
+			pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
+				ireq_64bit.req_ptr, ireq_64bit.req_len,
+				ireq_64bit.resp_ptr, ireq_64bit.resp_len);
+			return -EFAULT;
+		}
+		ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+		ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&ireq_64bit;
+		cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+	}
+	if (qseecom.whitelist_support
+		&& cmd_id == QSEOS_TEE_OPEN_SESSION)
+		*(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
+	else
+		*(uint32_t *)cmd_buf = cmd_id;
+
+	reqd_len_sb_in = req->req_len + req->resp_len;
+	ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+				QSEECOM_CACHE_CLEAN);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		goto exit;
+	}
+	ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+		if (ret)
+			goto exit;
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				goto exit;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+				goto exit;
+			}
+		}
+	}
+exit:
+	if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+			(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+		ret2 = __qseecom_update_qteec_req_buf(
+			(struct qseecom_qteec_modfd_req *)req, data, true);
+		if (ret2)
+			return ret2;
+	}
+	return ret;
+}
+
+static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp,
+				sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
+							QSEOS_TEE_OPEN_SESSION);
+
+	return ret;
+}
+
+static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
+	return ret;
+}
+
+static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_qteec_ireq ireq;
+	struct qseecom_qteec_64bit_ireq ireq_64bit;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	unsigned long flags;
+	int ret = 0;
+	int i = 0;
+	uint32_t reqd_len_sb_in = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+	void *req_ptr = NULL;
+	void *resp_ptr = NULL;
+
+	ret = copy_from_user(&req, argp,
+			sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_validate_msg(data,
+					(struct qseecom_qteec_req *)(&req));
+	if (ret)
+		return ret;
+	req_ptr = req.req_ptr;
+	resp_ptr = req.resp_ptr;
+
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req.ifd_data[i].fd) {
+			if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
+				return -EINVAL;
+		}
+	}
+	req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.req_ptr);
+	req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.resp_ptr);
+	ret = __qseecom_update_qteec_req_buf(&req, data, false);
+	if (ret)
+		return ret;
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		ireq.app_id = data->client.app_id;
+		ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq.req_len = req.req_len;
+		ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq.resp_len = req.resp_len;
+		cmd_buf = (void *)&ireq;
+		cmd_len = sizeof(struct qseecom_qteec_ireq);
+		ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+		ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+	} else {
+		ireq_64bit.app_id = data->client.app_id;
+		ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq_64bit.req_len = req.req_len;
+		ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq_64bit.resp_len = req.resp_len;
+		cmd_buf = (void *)&ireq_64bit;
+		cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+		ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+		ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+	}
+	reqd_len_sb_in = req.req_len + req.resp_len;
+	if (qseecom.whitelist_support)
+		*(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
+	else
+		*(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
+
+	ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_CLEAN);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		return ret;
+	}
+	ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				return ret;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+			}
+		}
+	}
+	ret = __qseecom_update_qteec_req_buf(&req, data, true);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp,
+				sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
+						QSEOS_TEE_REQUEST_CANCELLATION);
+
+	return ret;
+}
+
+static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
+{
+	if (data->sglist_cnt) {
+		memset(data->sglistinfo_ptr, 0,
+			SGLISTINFO_TABLE_SIZE);
+		data->sglist_cnt = 0;
+	}
+}
+
+static inline long qseecom_ioctl(struct file *file,
+			unsigned int cmd, unsigned long arg)
+{
+	int ret = 0;
+	struct qseecom_dev_handle *data = file->private_data;
+	void __user *argp = (void __user *) arg;
+	bool perf_enabled = false;
+
+	if (!data) {
+		pr_err("Invalid/uninitialized device handle\n");
+		return -EINVAL;
+	}
+
+	if (data->abort) {
+		pr_err("Aborting qseecom driver\n");
+		return -ENODEV;
+	}
+	if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
+		cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
+		cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
+		cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
+		__qseecom_processing_pending_lsnr_unregister();
+
+	switch (cmd) {
+	case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("reg lstnr req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("ioctl register_listener_req()\n");
+		mutex_lock(&listener_access_lock);
+		atomic_inc(&data->ioctl_count);
+		data->type = QSEECOM_LISTENER_SERVICE;
+		ret = qseecom_register_listener(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&listener_access_lock);
+		if (ret)
+			pr_err("failed qseecom_register_listener: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("ioctl unregister_listener_req()\n");
+		mutex_lock(&listener_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unregister_listener(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&listener_access_lock);
+		if (ret)
+			pr_err("failed qseecom_unregister_listener: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_CMD_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		if (qseecom.support_bus_scaling) {
+			/* register bus bw in case the client doesn't do it */
+			if (!data->mode) {
+				mutex_lock(&qsee_bw_mutex);
+				__qseecom_register_bus_bandwidth_needs(
+								data, HIGH);
+				mutex_unlock(&qsee_bw_mutex);
+			}
+			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+			if (ret) {
+				pr_err("Failed to set bw.\n");
+				ret = -EINVAL;
+				mutex_unlock(&app_access_lock);
+				break;
+			}
+		}
+		/*
+		 * On targets where crypto clock is handled by HLOS,
+		 * if clk_access_cnt is zero and perf_enabled is false,
+		 * then the crypto clock was not enabled before sending cmd to
+		 * tz, qseecom will enable the clock to avoid service failure.
+		 */
+		if (!qseecom.no_clock_support &&
+			!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+			pr_debug("ce clock is not enabled!\n");
+			ret = qseecom_perf_enable(data);
+			if (ret) {
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+			perf_enabled = true;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_cmd(data, argp);
+		if (qseecom.support_bus_scaling)
+			__qseecom_add_bw_scale_down_timer(
+				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		if (perf_enabled) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		}
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_cmd: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+	case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		if (qseecom.support_bus_scaling) {
+			if (!data->mode) {
+				mutex_lock(&qsee_bw_mutex);
+				__qseecom_register_bus_bandwidth_needs(
+								data, HIGH);
+				mutex_unlock(&qsee_bw_mutex);
+			}
+			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+			if (ret) {
+				pr_err("Failed to set bw.\n");
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+		}
+		/*
+		 * On targets where crypto clock is handled by HLOS,
+		 * if clk_access_cnt is zero and perf_enabled is false,
+		 * then the crypto clock was not enabled before sending cmd to
+		 * tz, qseecom will enable the clock to avoid service failure.
+		 */
+		if (!qseecom.no_clock_support &&
+			!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+			pr_debug("ce clock is not enabled!\n");
+			ret = qseecom_perf_enable(data);
+			if (ret) {
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+			perf_enabled = true;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
+			ret = qseecom_send_modfd_cmd(data, argp);
+		else
+			ret = qseecom_send_modfd_cmd_64(data, argp);
+		if (qseecom.support_bus_scaling)
+			__qseecom_add_bw_scale_down_timer(
+				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		if (perf_enabled) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		}
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_IOCTL_RECEIVE_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("receive req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_receive_req(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret && (ret != -ERESTARTSYS))
+			pr_err("failed qseecom_receive_req: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_RESP_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("send resp req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		mutex_lock(&listener_access_lock);
+		atomic_inc(&data->ioctl_count);
+		if (!qseecom.qsee_reentrancy_support)
+			ret = qseecom_send_resp();
+		else
+			ret = qseecom_reentrancy_send_resp(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&listener_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_resp: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
+		if ((data->type != QSEECOM_CLIENT_APP) &&
+			(data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_SECURE_SERVICE)) {
+			pr_err("set mem param req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_set_client_mem_param(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed Qqseecom_set_mem_param request: %d\n",
+								ret);
+		break;
+	}
+	case QSEECOM_IOCTL_LOAD_APP_REQ: {
+		if ((data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("load app req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_CLIENT_APP;
+		pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_load_app(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed load_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unload_app(data, false);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed unload_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_get_qseos_version(data, argp);
+		if (ret)
+			pr_err("qseecom_get_qseos_version: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
+		if ((data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("perf enable req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if ((data->type == QSEECOM_CLIENT_APP) &&
+			(data->client.app_id == 0)) {
+			pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (qseecom.support_bus_scaling) {
+			mutex_lock(&qsee_bw_mutex);
+			__qseecom_register_bus_bandwidth_needs(data, HIGH);
+			mutex_unlock(&qsee_bw_mutex);
+		} else {
+			ret = qseecom_perf_enable(data);
+			if (ret)
+				pr_err("Fail to vote for clocks %d\n", ret);
+		}
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
+		if ((data->type != QSEECOM_SECURE_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("perf disable req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if ((data->type == QSEECOM_CLIENT_APP) &&
+			(data->client.app_id == 0)) {
+			pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (!qseecom.support_bus_scaling) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		} else {
+			mutex_lock(&qsee_bw_mutex);
+			qseecom_unregister_bus_bandwidth_needs(data);
+			mutex_unlock(&qsee_bw_mutex);
+		}
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+
+	case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
+		/* If crypto clock is not handled by HLOS, return directly. */
+		if (qseecom.no_clock_support) {
+			pr_debug("crypto clock is not handled by HLOS\n");
+			break;
+		}
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_scale_bus_bandwidth(data, argp);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("load ext elf req: invalid client handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_load_external_elf(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed load_external_elf request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
+		if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
+			pr_err("unload ext elf req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unload_external_elf(data);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed unload_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+		data->type = QSEECOM_CLIENT_APP;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
+		ret = qseecom_query_app_loaded(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("send cmd svc req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_SECURE_SERVICE;
+		if (qseecom.qsee_version < QSEE_VERSION_03) {
+			pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_service_cmd(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_CREATE_KEY_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("create key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Create Key feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_create_key(data, argp);
+		if (ret)
+			pr_err("failed to create encryption key: %d\n", ret);
+
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_WIPE_KEY_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("wipe key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Wipe Key feature unsupported in qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_wipe_key(data, argp);
+		if (ret)
+			pr_err("failed to wipe encryption key: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("update key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Update Key feature unsupported in qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_update_key_user_info(data, argp);
+		if (ret)
+			pr_err("failed to update key user info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("save part hash req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_save_partition_hash(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("ES activated req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_is_es_activated(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_mdtp_cipher_dip(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_MODFD_RESP:
+	case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("receive req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		mutex_lock(&listener_access_lock);
+		atomic_inc(&data->ioctl_count);
+		if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
+			ret = qseecom_send_modfd_resp(data, argp);
+		else
+			ret = qseecom_send_modfd_resp_64(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&listener_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_mod_resp: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Open session: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_open_session(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed open_session_cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Close session: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_close_session(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed close_session_cmd: %d\n", ret);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed Invoke cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_request_cancellation(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed request_cancellation: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_get_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_free_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_query_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_SET_ICE_INFO: {
+		struct qseecom_ice_data_t ice_data;
+
+		ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
+		if (ret) {
+			pr_err("copy_from_user failed\n");
+			return -EFAULT;
+		}
+		qcom_ice_set_fde_flag(ice_data.flag);
+		break;
+	}
+	case QSEECOM_IOCTL_FBE_CLEAR_KEY: {
+		struct qseecom_ice_key_data_t key_data;
+
+		ret = copy_from_user(&key_data, argp, sizeof(key_data));
+		if (ret) {
+			pr_err("copy from user failed\n");
+			return -EFAULT;
+		}
+		pfk_fbe_clear_key((const unsigned char *) key_data.key,
+				key_data.key_len, (const unsigned char *)
+				key_data.salt, key_data.salt_len);
+		break;
+	}
+	default:
+		pr_err("Invalid IOCTL: 0x%x\n", cmd);
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int qseecom_open(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct qseecom_dev_handle *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+	file->private_data = data;
+	data->abort = 0;
+	data->type = QSEECOM_GENERIC;
+	data->released = false;
+	memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
+	data->mode = INACTIVE;
+	init_waitqueue_head(&data->abort_wq);
+	atomic_set(&data->ioctl_count, 0);
+	return ret;
+}
+
+static int qseecom_release(struct inode *inode, struct file *file)
+{
+	struct qseecom_dev_handle *data = file->private_data;
+	int ret = 0;
+	bool free_private_data = true;
+
+	if (!data->released) {
+		pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
+			data->type, data->mode, data);
+		switch (data->type) {
+		case QSEECOM_LISTENER_SERVICE:
+			pr_debug("release lsnr svc %d\n", data->listener.id);
+			free_private_data = false;
+			mutex_lock(&listener_access_lock);
+			ret = qseecom_unregister_listener(data);
+			mutex_unlock(&listener_access_lock);
+			break;
+		case QSEECOM_CLIENT_APP:
+			mutex_lock(&app_access_lock);
+			ret = qseecom_unload_app(data, true);
+			mutex_unlock(&app_access_lock);
+			break;
+		case QSEECOM_SECURE_SERVICE:
+		case QSEECOM_GENERIC:
+			if (data->client.dmabuf)
+				qseecom_vaddr_unmap(data->client.sb_virt,
+					data->client.sgt, data->client.attach,
+					data->client.dmabuf);
+			break;
+		case QSEECOM_UNAVAILABLE_CLIENT_APP:
+			break;
+		default:
+			pr_err("Unsupported clnt_handle_type %d\n",
+				data->type);
+			break;
+		}
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		if (data->mode != INACTIVE) {
+			qseecom_unregister_bus_bandwidth_needs(data);
+			if (qseecom.cumulative_mode == INACTIVE) {
+				ret = __qseecom_set_msm_bus_request(INACTIVE);
+				if (ret)
+					pr_err("Fail to scale down bus\n");
+			}
+		}
+		mutex_unlock(&qsee_bw_mutex);
+	} else {
+		if (data->fast_load_enabled)
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		if (data->perf_enabled)
+			qsee_disable_clock_vote(data, CLK_DFAB);
+	}
+
+	if (free_private_data)
+		kfree(data);
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+#include "compat_qseecom.c"
+#else
+#define compat_qseecom_ioctl	NULL
+#endif
+
+static const struct file_operations qseecom_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qseecom_ioctl,
+	.compat_ioctl = compat_qseecom_ioctl,
+	.open = qseecom_open,
+	.release = qseecom_release
+};
+
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
+{
+	int rc = 0;
+	struct device *pdev;
+	struct qseecom_clk *qclk;
+	char *core_clk_src = NULL;
+	char *core_clk = NULL;
+	char *iface_clk = NULL;
+	char *bus_clk = NULL;
+
+	switch (ce) {
+	case CLK_QSEE: {
+		core_clk_src = "core_clk_src";
+		core_clk = "core_clk";
+		iface_clk = "iface_clk";
+		bus_clk = "bus_clk";
+		qclk = &qseecom.qsee;
+		qclk->instance = CLK_QSEE;
+		break;
+	};
+	case CLK_CE_DRV: {
+		core_clk_src = "ce_drv_core_clk_src";
+		core_clk = "ce_drv_core_clk";
+		iface_clk = "ce_drv_iface_clk";
+		bus_clk = "ce_drv_bus_clk";
+		qclk = &qseecom.ce_drv;
+		qclk->instance = CLK_CE_DRV;
+		break;
+	};
+	default:
+		pr_err("Invalid ce hw instance: %d!\n", ce);
+		return -EIO;
+	}
+
+	if (qseecom.no_clock_support) {
+		qclk->ce_core_clk = NULL;
+		qclk->ce_clk = NULL;
+		qclk->ce_bus_clk = NULL;
+		qclk->ce_core_src_clk = NULL;
+		return 0;
+	}
+
+	pdev = qseecom.pdev;
+
+	/* Get CE3 src core clk. */
+	qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
+	if (!IS_ERR(qclk->ce_core_src_clk)) {
+		rc = clk_set_rate(qclk->ce_core_src_clk,
+					qseecom.ce_opp_freq_hz);
+		if (rc) {
+			clk_put(qclk->ce_core_src_clk);
+			qclk->ce_core_src_clk = NULL;
+			pr_err("Unable to set the core src clk @%uMhz.\n",
+				qseecom.ce_opp_freq_hz/CE_CLK_DIV);
+			return -EIO;
+		}
+	} else {
+		pr_warn("Unable to get CE core src clk, set to NULL\n");
+		qclk->ce_core_src_clk = NULL;
+	}
+
+	/* Get CE core clk */
+	qclk->ce_core_clk = clk_get(pdev, core_clk);
+	if (IS_ERR(qclk->ce_core_clk)) {
+		rc = PTR_ERR(qclk->ce_core_clk);
+		pr_err("Unable to get CE core clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		return -EIO;
+	}
+
+	/* Get CE Interface clk */
+	qclk->ce_clk = clk_get(pdev, iface_clk);
+	if (IS_ERR(qclk->ce_clk)) {
+		rc = PTR_ERR(qclk->ce_clk);
+		pr_err("Unable to get CE interface clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		clk_put(qclk->ce_core_clk);
+		return -EIO;
+	}
+
+	/* Get CE AXI clk */
+	qclk->ce_bus_clk = clk_get(pdev, bus_clk);
+	if (IS_ERR(qclk->ce_bus_clk)) {
+		rc = PTR_ERR(qclk->ce_bus_clk);
+		pr_err("Unable to get CE BUS interface clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		clk_put(qclk->ce_core_clk);
+		clk_put(qclk->ce_clk);
+		return -EIO;
+	}
+
+	return rc;
+}
+
+static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	if (qclk->ce_clk != NULL) {
+		clk_put(qclk->ce_clk);
+		qclk->ce_clk = NULL;
+	}
+	if (qclk->ce_core_clk != NULL) {
+		clk_put(qclk->ce_core_clk);
+		qclk->ce_core_clk = NULL;
+	}
+	if (qclk->ce_bus_clk != NULL) {
+		clk_put(qclk->ce_bus_clk);
+		qclk->ce_bus_clk = NULL;
+	}
+	if (qclk->ce_core_src_clk != NULL) {
+		clk_put(qclk->ce_core_src_clk);
+		qclk->ce_core_src_clk = NULL;
+	}
+	qclk->instance = CLK_INVALID;
+}
+
+static int qseecom_retrieve_ce_data(struct platform_device *pdev)
+{
+	int rc = 0;
+	uint32_t hlos_num_ce_hw_instances;
+	uint32_t disk_encrypt_pipe;
+	uint32_t file_encrypt_pipe;
+	uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
+	int i;
+	const int *tbl;
+	int size;
+	int entry;
+	struct qseecom_crypto_info *pfde_tbl = NULL;
+	struct qseecom_crypto_info *p;
+	int tbl_size;
+	int j;
+	bool old_db = true;
+	struct qseecom_ce_info_use *pce_info_use;
+	uint32_t *unit_tbl = NULL;
+	int total_units = 0;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
+	qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,qsee-ce-hw-instance",
+				&qseecom.ce_info.qsee_ce_hw_instance)) {
+		pr_err("Fail to get qsee ce hw instance information.\n");
+		rc = -EINVAL;
+		goto out;
+	} else {
+		pr_debug("qsee-ce-hw-instance=0x%x\n",
+			qseecom.ce_info.qsee_ce_hw_instance);
+	}
+
+	qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-fde");
+	qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-pfe");
+
+	if (!qseecom.support_pfe && !qseecom.support_fde) {
+		pr_warn("Device does not support PFE/FDE\n");
+		goto out;
+	}
+
+	if (qseecom.support_fde)
+		tbl = of_get_property((&pdev->dev)->of_node,
+			"qcom,full-disk-encrypt-info", &size);
+	else
+		tbl = NULL;
+	if (tbl) {
+		old_db = false;
+		if (size % sizeof(struct qseecom_crypto_info)) {
+			pr_err("full-disk-encrypt-info tbl size(%d)\n",
+				size);
+			rc = -EINVAL;
+			goto out;
+		}
+		tbl_size = size / sizeof
+				(struct qseecom_crypto_info);
+
+		pfde_tbl = kzalloc(size, GFP_KERNEL);
+		unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
+		total_units = 0;
+
+		if (!pfde_tbl || !unit_tbl) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,full-disk-encrypt-info",
+			(u32 *)pfde_tbl, size/sizeof(u32))) {
+			pr_err("failed to read full-disk-encrypt-info tbl\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+			for (j = 0; j < total_units; j++) {
+				if (p->unit_num == *(unit_tbl + j))
+					break;
+			}
+			if (j == total_units) {
+				*(unit_tbl + total_units) = p->unit_num;
+				total_units++;
+			}
+		}
+
+		qseecom.ce_info.num_fde = total_units;
+		pce_info_use = qseecom.ce_info.fde = kcalloc(
+			total_units, sizeof(struct qseecom_ce_info_use),
+				GFP_KERNEL);
+		if (!pce_info_use) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		for (j = 0; j < total_units; j++, pce_info_use++) {
+			pce_info_use->unit_num = *(unit_tbl + j);
+			pce_info_use->alloc = false;
+			pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
+			pce_info_use->num_ce_pipe_entries = 0;
+			pce_info_use->ce_pipe_entry = NULL;
+			for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num)
+					pce_info_use->num_ce_pipe_entries++;
+			}
+
+			entry = pce_info_use->num_ce_pipe_entries;
+			pce_entry = pce_info_use->ce_pipe_entry =
+				kcalloc(entry,
+					sizeof(struct qseecom_ce_pipe_entry),
+					GFP_KERNEL);
+			if (pce_entry == NULL) {
+				rc = -ENOMEM;
+				goto out;
+			}
+
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num) {
+					pce_entry->ce_num = p->ce;
+					pce_entry->ce_pipe_pair =
+							p->pipe_pair;
+					pce_entry->valid = true;
+					pce_entry++;
+				}
+			}
+		}
+		kfree(unit_tbl);
+		unit_tbl = NULL;
+		kfree(pfde_tbl);
+		pfde_tbl = NULL;
+	}
+
+	if (qseecom.support_pfe)
+		tbl = of_get_property((&pdev->dev)->of_node,
+			"qcom,per-file-encrypt-info", &size);
+	else
+		tbl = NULL;
+	if (tbl) {
+		old_db = false;
+		if (size % sizeof(struct qseecom_crypto_info)) {
+			pr_err("per-file-encrypt-info tbl size(%d)\n",
+				size);
+			rc = -EINVAL;
+			goto out;
+		}
+		tbl_size = size / sizeof
+				(struct qseecom_crypto_info);
+
+		pfde_tbl = kzalloc(size, GFP_KERNEL);
+		unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
+		total_units = 0;
+		if (!pfde_tbl || !unit_tbl) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,per-file-encrypt-info",
+			(u32 *)pfde_tbl, size/sizeof(u32))) {
+			pr_err("failed to read per-file-encrypt-info tbl\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+			for (j = 0; j < total_units; j++) {
+				if (p->unit_num == *(unit_tbl + j))
+					break;
+			}
+			if (j == total_units) {
+				*(unit_tbl + total_units) = p->unit_num;
+				total_units++;
+			}
+		}
+
+		qseecom.ce_info.num_pfe = total_units;
+		pce_info_use = qseecom.ce_info.pfe = kcalloc(
+			total_units, sizeof(struct qseecom_ce_info_use),
+				GFP_KERNEL);
+		if (!pce_info_use) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		for (j = 0; j < total_units; j++, pce_info_use++) {
+			pce_info_use->unit_num = *(unit_tbl + j);
+			pce_info_use->alloc = false;
+			pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
+			pce_info_use->num_ce_pipe_entries = 0;
+			pce_info_use->ce_pipe_entry = NULL;
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num)
+					pce_info_use->num_ce_pipe_entries++;
+			}
+
+			entry = pce_info_use->num_ce_pipe_entries;
+			pce_entry = pce_info_use->ce_pipe_entry =
+				kcalloc(entry,
+					sizeof(struct qseecom_ce_pipe_entry),
+					GFP_KERNEL);
+			if (pce_entry == NULL) {
+				rc = -ENOMEM;
+				goto out;
+			}
+
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num) {
+					pce_entry->ce_num = p->ce;
+					pce_entry->ce_pipe_pair =
+							p->pipe_pair;
+					pce_entry->valid = true;
+					pce_entry++;
+				}
+			}
+		}
+		kfree(unit_tbl);
+		unit_tbl = NULL;
+		kfree(pfde_tbl);
+		pfde_tbl = NULL;
+	}
+
+	if (!old_db)
+		goto out1;
+
+	if (of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,support-multiple-ce-hw-instance")) {
+		if (of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,hlos-num-ce-hw-instances",
+				&hlos_num_ce_hw_instances)) {
+			pr_err("Fail: get hlos number of ce hw instance\n");
+			rc = -EINVAL;
+			goto out;
+		}
+	} else {
+		hlos_num_ce_hw_instances = 1;
+	}
+
+	if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
+		pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
+			MAX_CE_PIPE_PAIR_PER_UNIT);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
+			hlos_num_ce_hw_instances)) {
+		pr_err("Fail: get hlos ce hw instance info\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (qseecom.support_fde) {
+		pce_info_use = qseecom.ce_info.fde =
+			kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
+		if (!pce_info_use) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* by default for old db */
+		qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
+		pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
+		pce_info_use->alloc = false;
+		pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
+		pce_info_use->ce_pipe_entry = NULL;
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,disk-encrypt-pipe-pair",
+				&disk_encrypt_pipe)) {
+			pr_err("Fail to get FDE pipe information.\n");
+			rc = -EINVAL;
+				goto out;
+		} else {
+			pr_debug("disk-encrypt-pipe-pair=0x%x\n",
+				disk_encrypt_pipe);
+		}
+		entry = pce_info_use->num_ce_pipe_entries =
+				hlos_num_ce_hw_instances;
+		pce_entry = pce_info_use->ce_pipe_entry =
+			kcalloc(entry,
+				sizeof(struct qseecom_ce_pipe_entry),
+				GFP_KERNEL);
+		if (pce_entry == NULL) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < entry; i++) {
+			pce_entry->ce_num = hlos_ce_hw_instance[i];
+			pce_entry->ce_pipe_pair = disk_encrypt_pipe;
+			pce_entry->valid = 1;
+			pce_entry++;
+		}
+	} else {
+		pr_warn("Device does not support FDE\n");
+		disk_encrypt_pipe = 0xff;
+	}
+	if (qseecom.support_pfe) {
+		pce_info_use = qseecom.ce_info.pfe =
+			kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
+		if (!pce_info_use) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* by default for old db */
+		qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
+		pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
+		pce_info_use->alloc = false;
+		pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
+		pce_info_use->ce_pipe_entry = NULL;
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,file-encrypt-pipe-pair",
+				&file_encrypt_pipe)) {
+			pr_err("Fail to get PFE pipe information.\n");
+			rc = -EINVAL;
+			goto out;
+		} else {
+			pr_debug("file-encrypt-pipe-pair=0x%x\n",
+				file_encrypt_pipe);
+		}
+		entry = pce_info_use->num_ce_pipe_entries =
+						hlos_num_ce_hw_instances;
+		pce_entry = pce_info_use->ce_pipe_entry =
+			kcalloc(entry,
+				sizeof(struct qseecom_ce_pipe_entry),
+				GFP_KERNEL);
+		if (pce_entry == NULL) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < entry; i++) {
+			pce_entry->ce_num = hlos_ce_hw_instance[i];
+			pce_entry->ce_pipe_pair = file_encrypt_pipe;
+			pce_entry->valid = 1;
+			pce_entry++;
+		}
+	} else {
+		pr_warn("Device does not support PFE\n");
+		file_encrypt_pipe = 0xff;
+	}
+
+out1:
+	qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
+	qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
+out:
+	if (rc) {
+		if (qseecom.ce_info.fde) {
+			pce_info_use = qseecom.ce_info.fde;
+			for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+				pce_entry = pce_info_use->ce_pipe_entry;
+				kfree(pce_entry);
+				pce_info_use++;
+			}
+		}
+		kfree(qseecom.ce_info.fde);
+		qseecom.ce_info.fde = NULL;
+		if (qseecom.ce_info.pfe) {
+			pce_info_use = qseecom.ce_info.pfe;
+			for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+				pce_entry = pce_info_use->ce_pipe_entry;
+				kfree(pce_entry);
+				pce_info_use++;
+			}
+		}
+		kfree(qseecom.ce_info.pfe);
+		qseecom.ce_info.pfe = NULL;
+	}
+	kfree(unit_tbl);
+	kfree(pfde_tbl);
+	return rc;
+}
+
+static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	int i;
+	unsigned int entries;
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	bool found = false;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+	for (i = 0; i < total; i++) {
+		if (!p->alloc)
+			pce_info_use = p;
+		else if (!memcmp(p->handle, pinfo->handle,
+						MAX_CE_INFO_HANDLE_SIZE)) {
+			pce_info_use = p;
+			found = true;
+			break;
+		}
+		p++;
+	}
+
+	if (pce_info_use == NULL)
+		return -EBUSY;
+
+	pinfo->unit_num = pce_info_use->unit_num;
+	if (!pce_info_use->alloc) {
+		pce_info_use->alloc = true;
+		memcpy(pce_info_use->handle,
+			pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
+	}
+	if (pce_info_use->num_ce_pipe_entries >
+					MAX_CE_PIPE_PAIR_PER_UNIT)
+		entries = MAX_CE_PIPE_PAIR_PER_UNIT;
+	else
+		entries = pce_info_use->num_ce_pipe_entries;
+	pinfo->num_ce_pipe_entries = entries;
+	pce_entry = pce_info_use->ce_pipe_entry;
+	for (i = 0; i < entries; i++, pce_entry++)
+		pinfo->ce_pipe_entry[i] = *pce_entry;
+	for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+
+	if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+	}
+	return ret;
+}
+
+static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	struct qseecom_ce_info_use *p;
+	int total = 0;
+	int i;
+	bool found = false;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret)
+		return ret;
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < total; i++) {
+		if (p->alloc &&
+			!memcmp(p->handle, pinfo->handle,
+					MAX_CE_INFO_HANDLE_SIZE)) {
+			memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
+			p->alloc = false;
+			found = true;
+			break;
+		}
+		p++;
+	}
+	return ret;
+}
+
+static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	int i;
+	unsigned int entries;
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	bool found = false;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret)
+		return ret;
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+	pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
+	pinfo->num_ce_pipe_entries  = 0;
+	for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+
+	for (i = 0; i < total; i++) {
+
+		if (p->alloc && !memcmp(p->handle,
+				pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
+			pce_info_use = p;
+			found = true;
+			break;
+		}
+		p++;
+	}
+	if (!pce_info_use)
+		goto out;
+	pinfo->unit_num = pce_info_use->unit_num;
+	if (pce_info_use->num_ce_pipe_entries >
+					MAX_CE_PIPE_PAIR_PER_UNIT)
+		entries = MAX_CE_PIPE_PAIR_PER_UNIT;
+	else
+		entries = pce_info_use->num_ce_pipe_entries;
+	pinfo->num_ce_pipe_entries = entries;
+	pce_entry = pce_info_use->ce_pipe_entry;
+	for (i = 0; i < entries; i++, pce_entry++)
+		pinfo->ce_pipe_entry[i] = *pce_entry;
+	for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+out:
+	if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+	}
+	return ret;
+}
+
+/*
+ * Check whitelist feature, and if TZ feature version is < 1.0.0,
+ * then whitelist feature is not supported.
+ */
+#define GET_FEAT_VERSION_CMD	3
+static int qseecom_check_whitelist_feature(void)
+{
+	struct scm_desc desc = {0};
+	int version = 0;
+	int ret = 0;
+
+	desc.args[0] = FEATURE_ID_WHITELIST;
+	desc.arginfo = SCM_ARGS(1);
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO,
+		GET_FEAT_VERSION_CMD), &desc);
+	if (!ret)
+		version = desc.ret[0];
+
+	return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
+}
+
+static int qseecom_probe(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+	uint32_t feature = 10;
+	struct device *class_dev;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_ce_info_use *pce_info_use = NULL;
+
+	qseecom.qsee_bw_count = 0;
+	qseecom.qsee_perf_client = 0;
+	qseecom.qsee_sfpb_bw_count = 0;
+
+	qseecom.qsee.ce_core_clk = NULL;
+	qseecom.qsee.ce_clk = NULL;
+	qseecom.qsee.ce_core_src_clk = NULL;
+	qseecom.qsee.ce_bus_clk = NULL;
+
+	qseecom.cumulative_mode = 0;
+	qseecom.current_mode = INACTIVE;
+	qseecom.support_bus_scaling = false;
+	qseecom.support_fde = false;
+	qseecom.support_pfe = false;
+
+	qseecom.ce_drv.ce_core_clk = NULL;
+	qseecom.ce_drv.ce_clk = NULL;
+	qseecom.ce_drv.ce_core_src_clk = NULL;
+	qseecom.ce_drv.ce_bus_clk = NULL;
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
+
+	qseecom.app_block_ref_cnt = 0;
+	init_waitqueue_head(&qseecom.app_block_wq);
+	qseecom.whitelist_support = true;
+
+	rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
+	if (rc < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", rc);
+		return rc;
+	}
+
+	driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
+	if (IS_ERR(driver_class)) {
+		rc = -ENOMEM;
+		pr_err("class_create failed %d\n", rc);
+		goto exit_unreg_chrdev_region;
+	}
+
+	class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
+			QSEECOM_DEV);
+	if (IS_ERR(class_dev)) {
+		pr_err("class_device_create failed %d\n", rc);
+		rc = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(&qseecom.cdev, &qseecom_fops);
+	qseecom.cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
+	if (rc < 0) {
+		pr_err("cdev_add failed %d\n", rc);
+		goto exit_destroy_device;
+	}
+
+	INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
+	INIT_LIST_HEAD(&qseecom.registered_app_list_head);
+	spin_lock_init(&qseecom.registered_app_list_lock);
+	INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
+	INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
+	spin_lock_init(&qseecom.registered_kclient_list_lock);
+	init_waitqueue_head(&qseecom.send_resp_wq);
+	init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
+	qseecom.send_resp_flag = 0;
+
+	qseecom.qsee_version = QSEEE_VERSION_00;
+	rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
+		&resp, sizeof(resp));
+	pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
+	if (rc) {
+		pr_err("Failed to get QSEE version info %d\n", rc);
+		goto exit_del_cdev;
+	}
+	qseecom.qsee_version = resp.result;
+	qseecom.qseos_version = QSEOS_VERSION_14;
+	qseecom.commonlib_loaded = false;
+	qseecom.commonlib64_loaded = false;
+	qseecom.pdev = class_dev;
+	qseecom.dev = &pdev->dev;
+
+	rc = dma_set_mask(qseecom.dev, DMA_BIT_MASK(64));
+	if (rc) {
+		pr_err("qseecom failed to set dma mask %d\n", rc);
+		goto exit_del_cdev;
+	}
+
+	/* register client for bus scaling */
+	if (pdev->dev.of_node) {
+		qseecom.pdev->of_node = pdev->dev.of_node;
+		qseecom.support_bus_scaling =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-bus-scaling");
+		rc = qseecom_retrieve_ce_data(pdev);
+		if (rc)
+			goto exit_destroy_ion_client;
+		qseecom.appsbl_qseecom_support =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,appsbl-qseecom-support");
+		pr_debug("qseecom.appsbl_qseecom_support = 0x%x\n",
+				qseecom.appsbl_qseecom_support);
+
+		qseecom.commonlib64_loaded =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,commonlib64-loaded-by-uefi");
+		pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x\n",
+				qseecom.commonlib64_loaded);
+		qseecom.fde_key_size =
+			of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,fde-key-size");
+		qseecom.no_clock_support =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,no-clock-support");
+		if (!qseecom.no_clock_support) {
+			pr_info("qseecom clocks handled by other subsystem\n");
+		} else {
+			pr_info("no-clock-support=0x%x\n",
+			qseecom.no_clock_support);
+		}
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+					"qcom,qsee-reentrancy-support",
+					&qseecom.qsee_reentrancy_support)) {
+			pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
+			qseecom.qsee_reentrancy_support = 0;
+		} else {
+			pr_warn("qseecom.qsee_reentrancy_support = %d\n",
+				qseecom.qsee_reentrancy_support);
+		}
+
+		qseecom.enable_key_wrap_in_ks =
+			of_property_read_bool((&pdev->dev)->of_node,
+					"qcom,enable-key-wrap-in-ks");
+		if (qseecom.enable_key_wrap_in_ks) {
+			pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
+					qseecom.enable_key_wrap_in_ks);
+		}
+
+		/*
+		 * The qseecom bus scaling flag can not be enabled when
+		 * crypto clock is not handled by HLOS.
+		 */
+		if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
+			pr_err("support_bus_scaling flag can not be enabled.\n");
+			rc = -EINVAL;
+			goto exit_destroy_ion_client;
+		}
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-opp-freq",
+				&qseecom.ce_opp_freq_hz)) {
+			pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
+			qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
+		}
+		rc = __qseecom_init_clk(CLK_QSEE);
+		if (rc)
+			goto exit_destroy_ion_client;
+
+		if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+				(qseecom.support_pfe || qseecom.support_fde)) {
+			rc = __qseecom_init_clk(CLK_CE_DRV);
+			if (rc) {
+				__qseecom_deinit_clk(CLK_QSEE);
+				goto exit_destroy_ion_client;
+			}
+		} else {
+			struct qseecom_clk *qclk;
+
+			qclk = &qseecom.qsee;
+			qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
+			qseecom.ce_drv.ce_clk = qclk->ce_clk;
+			qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
+			qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
+		}
+
+		if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
+			(!qseecom.is_apps_region_protected &&
+			!qseecom.appsbl_qseecom_support)) {
+			struct resource *resource = NULL;
+			struct qsee_apps_region_info_ireq req;
+			struct qsee_apps_region_info_64bit_ireq req_64bit;
+			struct qseecom_command_scm_resp resp;
+			void *cmd_buf = NULL;
+			size_t cmd_len;
+
+			resource = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "secapp-region");
+			if (resource) {
+				if (qseecom.qsee_version < QSEE_VERSION_40) {
+					req.qsee_cmd_id =
+						QSEOS_APP_REGION_NOTIFICATION;
+					req.addr = (uint32_t)resource->start;
+					req.size = resource_size(resource);
+					cmd_buf = (void *)&req;
+					cmd_len = sizeof(struct
+						qsee_apps_region_info_ireq);
+					pr_warn("secure app region addr=0x%x size=0x%x\n",
+							req.addr, req.size);
+				} else {
+					req_64bit.qsee_cmd_id =
+						QSEOS_APP_REGION_NOTIFICATION;
+					req_64bit.addr = resource->start;
+					req_64bit.size = resource_size(
+							resource);
+					cmd_buf = (void *)&req_64bit;
+					cmd_len = sizeof(struct
+					qsee_apps_region_info_64bit_ireq);
+					pr_warn("secure app region addr=0x%llx size=0x%x\n",
+						req_64bit.addr, req_64bit.size);
+				}
+			} else {
+				pr_err("Fail to get secure app region info\n");
+				rc = -EINVAL;
+				goto exit_deinit_clock;
+			}
+			rc = __qseecom_enable_clk(CLK_QSEE);
+			if (rc) {
+				pr_err("CLK_QSEE enabling failed (%d)\n", rc);
+				rc = -EIO;
+				goto exit_deinit_clock;
+			}
+			rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len,
+					&resp, sizeof(resp));
+			__qseecom_disable_clk(CLK_QSEE);
+			if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
+				pr_err("send secapp reg fail %d resp.res %d\n",
+							rc, resp.result);
+				rc = -EINVAL;
+				goto exit_deinit_clock;
+			}
+		}
+	/*
+	 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
+	 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
+	 * Pls add "qseecom.commonlib64_loaded = true" here too.
+	 */
+		if (qseecom.is_apps_region_protected ||
+					qseecom.appsbl_qseecom_support)
+			qseecom.commonlib_loaded = true;
+	}
+
+	if (qseecom.support_bus_scaling) {
+		//init_timer(&(qseecom.bw_scale_down_timer));
+		INIT_WORK(&qseecom.bw_inactive_req_ws,
+					qseecom_bw_inactive_req_work);
+		/*qseecom.bw_scale_down_timer.function =
+		 *		qseecom_scale_bus_bandwidth_timer_callback;
+		 */
+	}
+	qseecom.timer_running = false;
+
+	qseecom.whitelist_support = qseecom_check_whitelist_feature();
+	pr_warn("qseecom.whitelist_support = %d\n",
+				qseecom.whitelist_support);
+
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
+	return 0;
+
+exit_deinit_clock:
+	__qseecom_deinit_clk(CLK_QSEE);
+	if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+		(qseecom.support_pfe || qseecom.support_fde))
+		__qseecom_deinit_clk(CLK_CE_DRV);
+exit_destroy_ion_client:
+	if (qseecom.ce_info.fde) {
+		pce_info_use = qseecom.ce_info.fde;
+		for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+			kzfree(pce_info_use->ce_pipe_entry);
+			pce_info_use++;
+		}
+		kfree(qseecom.ce_info.fde);
+	}
+	if (qseecom.ce_info.pfe) {
+		pce_info_use = qseecom.ce_info.pfe;
+		for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+			kzfree(pce_info_use->ce_pipe_entry);
+			pce_info_use++;
+		}
+		kfree(qseecom.ce_info.pfe);
+	}
+exit_del_cdev:
+	cdev_del(&qseecom.cdev);
+exit_destroy_device:
+	device_destroy(driver_class, qseecom_device_no);
+exit_destroy_class:
+	class_destroy(driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(qseecom_device_no, 1);
+	return rc;
+}
+
+static int qseecom_remove(struct platform_device *pdev)
+{
+	struct qseecom_registered_kclient_list *kclient = NULL;
+	struct qseecom_registered_kclient_list *kclient_tmp = NULL;
+	unsigned long flags = 0;
+	int ret = 0;
+	int i;
+	struct qseecom_ce_pipe_entry *pce_entry;
+	struct qseecom_ce_info_use *pce_info_use;
+
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+
+	list_for_each_entry_safe(kclient, kclient_tmp,
+		&qseecom.registered_kclient_list_head, list) {
+
+		/* Break the loop if client handle is NULL */
+		if (!kclient->handle) {
+			list_del(&kclient->list);
+			kzfree(kclient);
+			break;
+		}
+
+		list_del(&kclient->list);
+		mutex_lock(&app_access_lock);
+		ret = qseecom_unload_app(kclient->handle->dev, false);
+		mutex_unlock(&app_access_lock);
+		if (!ret) {
+			kzfree(kclient->handle->dev);
+			kzfree(kclient->handle);
+			kzfree(kclient);
+		}
+	}
+
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+	if (qseecom.qseos_version > QSEEE_VERSION_00)
+		qseecom_unload_commonlib_image();
+
+	if (qseecom.qsee_perf_client)
+		msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
+									0);
+	if (pdev->dev.platform_data != NULL)
+		msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
+
+	if (qseecom.support_bus_scaling) {
+		cancel_work_sync(&qseecom.bw_inactive_req_ws);
+		del_timer_sync(&qseecom.bw_scale_down_timer);
+	}
+
+	if (qseecom.ce_info.fde) {
+		pce_info_use = qseecom.ce_info.fde;
+		for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+			pce_entry = pce_info_use->ce_pipe_entry;
+			kfree(pce_entry);
+			pce_info_use++;
+		}
+	}
+	kfree(qseecom.ce_info.fde);
+	if (qseecom.ce_info.pfe) {
+		pce_info_use = qseecom.ce_info.pfe;
+		for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+			pce_entry = pce_info_use->ce_pipe_entry;
+			kfree(pce_entry);
+			pce_info_use++;
+		}
+	}
+	kfree(qseecom.ce_info.pfe);
+
+	/* register client for bus scaling */
+	if (pdev->dev.of_node) {
+		__qseecom_deinit_clk(CLK_QSEE);
+		if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+				(qseecom.support_pfe || qseecom.support_fde))
+			__qseecom_deinit_clk(CLK_CE_DRV);
+	}
+
+	cdev_del(&qseecom.cdev);
+
+	device_destroy(driver_class, qseecom_device_no);
+
+	class_destroy(driver_class);
+
+	unregister_chrdev_region(qseecom_device_no, 1);
+
+	return ret;
+}
+
+static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
+	if (qseecom.no_clock_support)
+		return 0;
+
+	mutex_lock(&qsee_bw_mutex);
+	mutex_lock(&clk_access_lock);
+
+	if (qseecom.current_mode != INACTIVE) {
+		ret = msm_bus_scale_client_update_request(
+			qseecom.qsee_perf_client, INACTIVE);
+		if (ret)
+			pr_err("Fail to scale down bus\n");
+		else
+			qseecom.current_mode = INACTIVE;
+	}
+
+	if (qclk->clk_access_cnt) {
+		if (qclk->ce_clk != NULL)
+			clk_disable_unprepare(qclk->ce_clk);
+		if (qclk->ce_core_clk != NULL)
+			clk_disable_unprepare(qclk->ce_core_clk);
+		if (qclk->ce_bus_clk != NULL)
+			clk_disable_unprepare(qclk->ce_bus_clk);
+	}
+
+	del_timer_sync(&(qseecom.bw_scale_down_timer));
+	qseecom.timer_running = false;
+
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	cancel_work_sync(&qseecom.bw_inactive_req_ws);
+
+	return 0;
+}
+
+static int qseecom_resume(struct platform_device *pdev)
+{
+	int mode = 0;
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	if (qseecom.no_clock_support)
+		goto exit;
+
+	mutex_lock(&qsee_bw_mutex);
+	mutex_lock(&clk_access_lock);
+	if (qseecom.cumulative_mode >= HIGH)
+		mode = HIGH;
+	else
+		mode = qseecom.cumulative_mode;
+
+	if (qseecom.cumulative_mode != INACTIVE) {
+		ret = msm_bus_scale_client_update_request(
+			qseecom.qsee_perf_client, mode);
+		if (ret)
+			pr_err("Fail to scale up bus to %d\n", mode);
+		else
+			qseecom.current_mode = mode;
+	}
+
+	if (qclk->clk_access_cnt) {
+		if (qclk->ce_core_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_core_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE core clk\n");
+				qclk->clk_access_cnt = 0;
+				goto err;
+			}
+		}
+		if (qclk->ce_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE iface clk\n");
+				qclk->clk_access_cnt = 0;
+				goto ce_clk_err;
+			}
+		}
+		if (qclk->ce_bus_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_bus_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE bus clk\n");
+				qclk->clk_access_cnt = 0;
+				goto ce_bus_clk_err;
+			}
+		}
+	}
+
+	if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
+		qseecom.bw_scale_down_timer.expires = jiffies +
+			msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		mod_timer(&(qseecom.bw_scale_down_timer),
+				qseecom.bw_scale_down_timer.expires);
+		qseecom.timer_running = true;
+	}
+
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	goto exit;
+
+ce_bus_clk_err:
+	if (qclk->ce_clk)
+		clk_disable_unprepare(qclk->ce_clk);
+ce_clk_err:
+	if (qclk->ce_core_clk)
+		clk_disable_unprepare(qclk->ce_core_clk);
+err:
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	ret = -EIO;
+exit:
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
+	return ret;
+}
+
+static const struct of_device_id qseecom_match[] = {
+	{
+		.compatible = "qcom,qseecom",
+	},
+	{}
+};
+
+static struct platform_driver qseecom_plat_driver = {
+	.probe = qseecom_probe,
+	.remove = qseecom_remove,
+	.suspend = qseecom_suspend,
+	.resume = qseecom_resume,
+	.driver = {
+		.name = "qseecom",
+		.of_match_table = qseecom_match,
+	},
+};
+
+static int qseecom_init(void)
+{
+	return platform_driver_register(&qseecom_plat_driver);
+}
+
+static void qseecom_exit(void)
+{
+	platform_driver_unregister(&qseecom_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
+
+module_init(qseecom_init);
+module_exit(qseecom_exit);
diff --git a/drivers/misc/qseecom_kernel.h b/drivers/misc/qseecom_kernel.h
new file mode 100644
index 0000000..215a4e1
--- /dev/null
+++ b/drivers/misc/qseecom_kernel.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QSEECOM_KERNEL_H_
+#define __QSEECOM_KERNEL_H_
+
+#include <linux/types.h>
+#include <soc/qcom/scm.h>
+
+#define QSEECOM_ALIGN_SIZE	0x40
+#define QSEECOM_ALIGN_MASK	(QSEECOM_ALIGN_SIZE - 1)
+#define QSEECOM_ALIGN(x)	\
+	((x + QSEECOM_ALIGN_MASK) & (~QSEECOM_ALIGN_MASK))
+
+/*
+ * struct qseecom_handle -
+ *      Handle to the qseecom device for kernel clients
+ * @sbuf - shared buffer pointer
+ * @sbbuf_len - shared buffer size
+ */
+struct qseecom_handle {
+	void *dev; /* in/out */
+	unsigned char *sbuf; /* in/out */
+	uint32_t sbuf_len; /* in/out */
+};
+
+int qseecom_start_app(struct qseecom_handle **handle,
+						char *app_name, uint32_t size);
+int qseecom_shutdown_app(struct qseecom_handle **handle);
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+			uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len);
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high);
+int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc);
+
+#endif /* __QSEECOM_KERNEL_H_ */
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index 3118a50e..6f9f5ae 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -12,7 +12,7 @@
 
 #define WLFW_SERVICE_INS_ID_V01		1
 #define WLFW_CLIENT_ID			0x4b4e454c
-#define MAX_BDF_FILE_NAME		11
+#define MAX_BDF_FILE_NAME		13
 #define ELF_BDF_FILE_NAME		"bdwlan.elf"
 #define ELF_BDF_FILE_NAME_PREFIX	"bdwlan.e"
 #define BIN_BDF_FILE_NAME		"bdwlan.bin"
@@ -424,18 +424,26 @@
 	case CNSS_BDF_ELF:
 		if (plat_priv->board_info.board_id == 0xFF)
 			snprintf(filename, sizeof(filename), ELF_BDF_FILE_NAME);
-		else
+		else if (plat_priv->board_info.board_id < 0xFF)
 			snprintf(filename, sizeof(filename),
 				 ELF_BDF_FILE_NAME_PREFIX "%02x",
 				 plat_priv->board_info.board_id);
+		else
+			snprintf(filename, sizeof(filename),
+				 ELF_BDF_FILE_NAME_PREFIX "%04x",
+				 plat_priv->board_info.board_id);
 		break;
 	case CNSS_BDF_BIN:
 		if (plat_priv->board_info.board_id == 0xFF)
 			snprintf(filename, sizeof(filename), BIN_BDF_FILE_NAME);
-		else
+		else if (plat_priv->board_info.board_id < 0xFF)
 			snprintf(filename, sizeof(filename),
 				 BIN_BDF_FILE_NAME_PREFIX "%02x",
 				 plat_priv->board_info.board_id);
+		else
+			snprintf(filename, sizeof(filename),
+				 BIN_BDF_FILE_NAME_PREFIX "%04x",
+				 plat_priv->board_info.board_id);
 		break;
 	case CNSS_BDF_DUMMY:
 		cnss_pr_dbg("CNSS_BDF_DUMMY is set, sending dummy BDF\n");
diff --git a/drivers/soc/qcom/mem-offline.c b/drivers/soc/qcom/mem-offline.c
index 91c70d7..02fdf01 100644
--- a/drivers/soc/qcom/mem-offline.c
+++ b/drivers/soc/qcom/mem-offline.c
@@ -188,8 +188,8 @@
 	start_section_nr = pfn_to_section_nr(memblock_end_pfn);
 	end_section_nr = pfn_to_section_nr(ram_end_pfn);
 
-	if (start_section_nr == end_section_nr) {
-		pr_err("mem-offline: System booted with no zone movable memory blocks. Cannot perform memory offlining\n");
+	if (start_section_nr >= end_section_nr) {
+		pr_info("mem-offline: System booted with no zone movable memory blocks. Cannot perform memory offlining\n");
 		return -EINVAL;
 	}
 	for (memblock = start_section_nr; memblock <= end_section_nr;
@@ -361,10 +361,16 @@
 
 static int mem_offline_driver_probe(struct platform_device *pdev)
 {
+	int ret;
+
 	if (mem_parse_dt(pdev))
 		return -ENODEV;
 
-	if (mem_online_remaining_blocks())
+	ret = mem_online_remaining_blocks();
+	if (ret < 0)
+		return -ENODEV;
+
+	if (ret > 0)
 		pr_err("mem-offline: !!ERROR!! Auto onlining some memory blocks failed. System could run with less RAM\n");
 
 	if (mem_sysfs_init())
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 0840d27..ccb5ee1 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -41,6 +41,9 @@
 	  If unsure, say Y, or else you won't be able to do much with your new
 	  shiny Linux system :-)
 
+config TTY_FLUSH_LOCAL_ECHO
+	bool
+
 config CONSOLE_TRANSLATIONS
 	depends on VT
 	default y
@@ -441,4 +444,21 @@
 	depends on SUN_LDOMS
 	help
 	  Support for Sun logical domain consoles.
+
+config OKL4_VTTY
+	bool "Virtual TTY on the OKL4 Microvisor"
+	depends on OKL4_GUEST
+	select TTY_FLUSH_LOCAL_ECHO
+	default y
+	---help---
+	  This device provides character-level read-write access
+	  to the virtual console, usually connected to a serial-server which
+	  multiplexes output on a physical UART.
+
+config OKL4_VTTY_CONSOLE
+	bool "Console on OKL4 VTTY"
+	depends on OKL4_VTTY
+	default y
+	help
+	  Console support for OKL4 Microvisor virtual ttys.
 endif # TTY
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index c72cafd..0320350 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -35,3 +35,4 @@
 obj-$(CONFIG_VCC)		+= vcc.o
 
 obj-y += ipwireless/
+obj-$(CONFIG_OKL4_VTTY)		+= okl4_vtty.o
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 3ad4602..fd031c1 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -126,6 +126,10 @@
 
 #define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
 
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+static void continue_process_echoes(struct work_struct *work);
+#endif
+
 static inline size_t read_cnt(struct n_tty_data *ldata)
 {
 	return ldata->read_head - ldata->read_tail;
@@ -762,7 +766,17 @@
 			tail++;
 	}
 
- not_yet_stored:
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+	if (ldata->echo_commit != tail) {
+		if (!tty->delayed_work) {
+			INIT_DELAYED_WORK(&tty->echo_delayed_work, continue_process_echoes);
+			schedule_delayed_work(&tty->echo_delayed_work, 1);
+		}
+		tty->delayed_work = 1;
+	}
+#endif
+
+not_yet_stored:
 	ldata->echo_tail = tail;
 	return old_space - space;
 }
@@ -828,6 +842,20 @@
 	mutex_unlock(&ldata->output_lock);
 }
 
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+static void continue_process_echoes(struct work_struct *work)
+{
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, echo_delayed_work.work);
+	struct n_tty_data *ldata = tty->disc_data;
+
+	mutex_lock(&ldata->output_lock);
+	tty->delayed_work = 0;
+	__process_echoes(tty);
+	mutex_unlock(&ldata->output_lock);
+}
+#endif
+
 /**
  *	add_echo_byte	-	add a byte to the echo buffer
  *	@c: unicode byte to echo
diff --git a/drivers/tty/okl4_vtty.c b/drivers/tty/okl4_vtty.c
new file mode 100644
index 0000000..aa9798d
--- /dev/null
+++ b/drivers/tty/okl4_vtty.c
@@ -0,0 +1,869 @@
+/*
+ * drivers/char/okl4_vtty.c
+ *
+ * Copyright (c) 2012-2014 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ * Copyright (c) 2014-2017 Cog Systems Pty Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * OKL4 Microvisor Virtual TTY driver.
+ *
+ * Clients using this driver must have vclient names of the form
+ * "vtty%d", where %d is the tty number, which must be
+ * unique and less than MAX_VTTYS.
+ */
+
+/* #define DEBUG 1 */
+/* #define VERBOSE_DEBUG 1 */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <clocksource/arm_arch_timer.h>
+#include <asm-generic/okl4_virq.h>
+
+#include <microvisor/microvisor.h>
+#if 0
+#include <asm/okl4-microvisor/okl4tags.h>
+#include <asm/okl4-microvisor/microvisor_bus.h>
+#include <asm/okl4-microvisor/virq.h>
+#endif
+
+#define DRIVER_NAME "okl4-vtty"
+#define DEVICE_NAME "vtty"
+#define DEVICE_PREFIX "ttyV"
+
+/* FIXME: Jira ticket SDK-138 - philipd. */
+#define MAX_VTTYS 8
+#define MAX_MSG_SIZE 32
+
+struct vtty_port {
+	bool exists;
+	int vtty_id;
+
+	bool read_throttled, write_full, irq_registered;
+	struct work_struct read_work;
+	spinlock_t write_lock;
+
+	/*
+	 * Buffer length is max_msg_size plus one u32, which encodes the
+	 * message length.
+	 */
+	char *read_buf;
+	int read_buf_pos, read_buf_len;
+	char *write_buf;
+	int write_buffered;
+	size_t max_msg_size;
+
+	okl4_kcap_t pipe_tx_kcap;
+	okl4_kcap_t pipe_rx_kcap;
+	int tx_irq;
+	int rx_irq;
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+	struct console console;
+#endif
+
+	struct device *dev;
+	struct tty_port port;
+};
+
+static struct workqueue_struct *read_workqueue;
+
+static struct vtty_port ports[MAX_VTTYS];
+
+static void
+vtty_read_irq(struct vtty_port *port)
+{
+	queue_work(read_workqueue, &port->read_work);
+}
+
+static int
+do_pipe_write(struct vtty_port *port, int count)
+{
+	okl4_error_t ret;
+	int send;
+
+	if (port->write_full)
+		return 0;
+
+	BUG_ON(count > port->max_msg_size);
+
+	*(u32 *)port->write_buf = count;
+	send = roundup(count + sizeof(u32), sizeof(u32));
+
+	ret = _okl4_sys_pipe_send(port->pipe_tx_kcap, send,
+			(void *)port->write_buf);
+
+	if (ret == OKL4_ERROR_PIPE_NOT_READY) {
+		okl4_pipe_control_t x = 0;
+
+		okl4_pipe_control_setdoop(&x, true);
+		okl4_pipe_control_setoperation(&x,
+			OKL4_PIPE_CONTROL_OP_SET_TX_READY);
+		_okl4_sys_pipe_control(port->pipe_tx_kcap, x);
+
+		ret = _okl4_sys_pipe_send(port->pipe_tx_kcap, send,
+				(void *)port->write_buf);
+	}
+
+	if (ret == OKL4_ERROR_PIPE_FULL ||
+			ret == OKL4_ERROR_PIPE_NOT_READY) {
+		port->write_full = true;
+		return 0;
+	}
+
+	if (ret != OKL4_OK)
+		return -EIO;
+
+	return count;
+}
+
+static void
+vtty_write_irq(struct vtty_port *port)
+{
+	struct tty_struct *tty = tty_port_tty_get(&port->port);
+
+	spin_lock(&port->write_lock);
+
+	port->write_full = false;
+
+	if (port->write_buffered &&
+			do_pipe_write(port, port->write_buffered) > 0)
+		port->write_buffered = 0;
+
+	if (tty)
+		tty_wakeup(tty);
+
+	spin_unlock(&port->write_lock);
+
+	tty_kref_put(tty);
+}
+
+static irqreturn_t
+vtty_tx_irq(int irq, void *dev)
+{
+	struct vtty_port *port = dev;
+	okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+	if (okl4_pipe_state_gettxavailable(&payload))
+		vtty_write_irq(port);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+vtty_rx_irq(int irq, void *dev)
+{
+	struct vtty_port *port = dev;
+	okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+	if (okl4_pipe_state_getrxavailable(&payload))
+		vtty_read_irq(port);
+
+	return IRQ_HANDLED;
+}
+
+static int
+vtty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	int port_num = tty->index;
+	struct vtty_port *port;
+	int status;
+
+	if (port_num < 0 || port_num >= MAX_VTTYS)
+		return -ENXIO;
+
+	port = &ports[port_num];
+	if (!port->exists)
+		return -ENODEV;
+
+	tty->driver_data = port;
+
+	port->write_full = false;
+	port->read_throttled = false;
+	port->write_buffered = 0;
+
+	/*
+	 * low_latency forces all tty read handling to be done by the
+	 * read task.
+	 */
+	port->port.low_latency = 1;
+
+	if (!port->irq_registered) {
+		status = devm_request_irq(port->dev, port->tx_irq,
+				vtty_tx_irq, 0, dev_name(port->dev), port);
+		if (status)
+			return status;
+
+		status = devm_request_irq(port->dev, port->rx_irq,
+				vtty_rx_irq, 0, dev_name(port->dev), port);
+		if (status) {
+			devm_free_irq(port->dev, port->tx_irq, port);
+			return status;
+		}
+
+		port->irq_registered = true;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+	tty_port_install(&port->port, driver, tty);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+	tty->port = &port->port;
+	tty_standard_install(driver, tty);
+#else
+	tty->port = &port->port;
+	if (tty_init_termios(tty) != 0)
+		return -ENOMEM;
+
+	tty_driver_kref_get(driver);
+	tty->count++;
+	driver->ttys[tty->index] = tty;
+#endif
+
+	return 0;
+}
+
+static int
+vtty_open(struct tty_struct *tty, struct file *file)
+{
+	struct vtty_port *port = tty->driver_data;
+	okl4_pipe_control_t x = 0;
+
+	okl4_pipe_control_setdoop(&x, true);
+	okl4_pipe_control_setoperation(&x,
+		OKL4_PIPE_CONTROL_OP_SET_TX_READY);
+	_okl4_sys_pipe_control(port->pipe_tx_kcap, x);
+	okl4_pipe_control_setoperation(&x,
+		OKL4_PIPE_CONTROL_OP_SET_RX_READY);
+	_okl4_sys_pipe_control(port->pipe_rx_kcap, x);
+
+	return tty_port_open(&port->port, tty, file);
+}
+
+static void
+vtty_close(struct tty_struct *tty, struct file *file)
+{
+	struct vtty_port *port = tty->driver_data;
+	if (port)
+		tty_port_close(&port->port, tty, file);
+}
+
+static int
+vtty_activate(struct tty_port *port, struct tty_struct *tty)
+{
+	struct vtty_port *vtty_port = tty->driver_data;
+
+	/* Run the read task immediately to drain the channel */
+	queue_work(read_workqueue, &vtty_port->read_work);
+
+	return 0;
+}
+
+static void
+vtty_shutdown(struct tty_port *port)
+{
+	struct vtty_port *vtty_port =
+			container_of(port, struct vtty_port, port);
+
+	cancel_work_sync(&vtty_port->read_work);
+}
+
+static int
+do_vtty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	struct vtty_port *port = tty->driver_data;
+	int retval = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->write_lock, flags);
+
+	/* If we have a whole message, try to send it */
+	if (port->write_buffered == 0 && count >= port->max_msg_size) {
+		if (count > port->max_msg_size)
+			count = port->max_msg_size;
+
+		memcpy(&port->write_buf[sizeof(u32)], buf, count);
+		retval = do_pipe_write(port, count);
+		count -= retval;
+	}
+
+	/* If nothing was sent yet, buffer the data */
+	if (!retval) {
+		/* Determine how much data will fit in the buffer */
+		if (count > port->max_msg_size - port->write_buffered)
+			count = port->max_msg_size - port->write_buffered;
+
+		/* Copy into the buffer if possible */
+		if (count) {
+			memcpy(&port->write_buf[sizeof(u32) +
+					port->write_buffered], buf, count);
+			port->write_buffered += count;
+			retval = count;
+		}
+
+		/* Flush the buffer if it is full */
+		if (port->write_buffered == port->max_msg_size) {
+			if (do_pipe_write(port, port->write_buffered) > 0)
+				port->write_buffered = 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&port->write_lock, flags);
+
+	return retval;
+}
+
+static void
+vtty_flush_chars(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->write_lock, flags);
+
+	if (port->write_buffered && do_pipe_write(port,
+			port->write_buffered) > 0) {
+		port->write_buffered = 0;
+		tty_wakeup(tty);
+	}
+
+	spin_unlock_irqrestore(&port->write_lock, flags);
+}
+
+static int
+vtty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	return do_vtty_write(tty, &ch, 1);
+}
+
+static int
+vtty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	int retval;
+
+	retval = do_vtty_write(tty, buf, count);
+	vtty_flush_chars(tty);
+
+	return retval;
+}
+
+static int
+vtty_write_room(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+
+	/*
+	 * If the channel is full, we have to buffer writes locally. While
+	 * vtty_write() can handle that, we may as well tell the ldisc to wait
+	 * for the channel to drain, so we return 0 here.
+	 */
+	return port->write_full ? 0 : port->max_msg_size - port->write_buffered;
+}
+
+static int
+vtty_chars_in_buffer(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+
+	return port->max_msg_size - vtty_write_room(tty);
+}
+
+static void
+vtty_throttle(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+
+	port->read_throttled = true;
+}
+
+static void
+vtty_unthrottle(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+
+	port->read_throttled = false;
+	queue_work(read_workqueue, &port->read_work);
+}
+
+static const struct tty_port_operations vtty_port_ops = {
+	.activate = vtty_activate,
+	.shutdown = vtty_shutdown,
+};
+
+static int vtty_proc_show(struct seq_file *m, void *v)
+{
+	int i;
+
+    seq_puts(m, "okl4vttyinfo:1.0 driver:1.0\n");
+	for (i = 0; i < ARRAY_SIZE(ports); i++) {
+		struct vtty_port *port = &ports[i];
+
+		if (!port->exists)
+			continue;
+		seq_printf(m, "%d: tx_kcap: %d tx_irq: %d rx_kcap: %d rx_irq: %d\n",
+				i, port->pipe_tx_kcap, port->tx_irq, port->pipe_rx_kcap, port->rx_irq);
+	}
+
+	return 0;
+}
+
+static const struct tty_operations vtty_ops = {
+	.install = vtty_install,
+	.open = vtty_open,
+	.close = vtty_close,
+	.write = vtty_write,
+	.put_char = vtty_put_char,
+	.flush_chars = vtty_flush_chars,
+	.write_room = vtty_write_room,
+	.chars_in_buffer = vtty_chars_in_buffer,
+	.throttle = vtty_throttle,
+	.unthrottle = vtty_unthrottle,
+	.proc_show = vtty_proc_show,
+};
+
+static void
+vtty_read_task(struct work_struct *work)
+{
+	struct vtty_port *port = container_of(work, struct vtty_port,
+			read_work);
+	struct tty_struct *tty = tty_port_tty_get(&port->port);
+	bool pushed = false;
+
+	if (!tty)
+		return;
+
+	while (true) {
+		struct _okl4_sys_pipe_recv_return ret_recv;
+		int space, len;
+
+		/* Stop reading if we are throttled. */
+		if (port->read_throttled)
+			break;
+
+		/* Find out how much space we have in the tty buffer. */
+		space = tty_buffer_request_room(&port->port,
+				port->max_msg_size);
+
+		if (space == 0) {
+			BUG_ON(pushed);
+			tty_flip_buffer_push(&port->port);
+			pushed = true;
+			continue;
+		} else {
+			pushed = false;
+		}
+
+		if (port->read_buf_pos == port->read_buf_len) {
+			/*
+			 * We have run out of chars in our message buffer.
+			 * Check whether there are any more messages in the
+			 * queue.
+			 */
+
+			ret_recv = _okl4_sys_pipe_recv(port->pipe_rx_kcap,
+					port->max_msg_size + sizeof(u32),
+					(void *)port->read_buf);
+			if (ret_recv.error == OKL4_ERROR_PIPE_NOT_READY) {
+				okl4_pipe_control_t x = 0;
+
+				okl4_pipe_control_setdoop(&x, true);
+				okl4_pipe_control_setoperation(&x,
+					OKL4_PIPE_CONTROL_OP_SET_RX_READY);
+				_okl4_sys_pipe_control(port->pipe_rx_kcap, x);
+
+				ret_recv = _okl4_sys_pipe_recv(port->pipe_rx_kcap,
+						port->max_msg_size + sizeof(u32),
+						(void *)port->read_buf);
+			}
+			if (ret_recv.error == OKL4_ERROR_PIPE_EMPTY ||
+					ret_recv.error == OKL4_ERROR_PIPE_NOT_READY) {
+				port->read_buf_pos = 0;
+				port->read_buf_len = 0;
+				break;
+			}
+
+			if (ret_recv.error != OKL4_OK) {
+				dev_err(port->dev,
+					"pipe receive returned error %d in vtty driver !\n",
+					(int)ret_recv.error);
+				port->read_buf_pos = 0;
+				port->read_buf_len = 0;
+				break;
+			}
+
+			port->read_buf_pos = sizeof(uint32_t);
+			port->read_buf_len = sizeof(uint32_t) +
+					*(uint32_t *)port->read_buf;
+		}
+
+		/* Send chars to tty layer. */
+		len = port->read_buf_len - port->read_buf_pos;
+		if (len > space)
+			len = space;
+
+		tty_insert_flip_string(&port->port, port->read_buf +
+				port->read_buf_pos, len);
+		port->read_buf_pos += len;
+	}
+
+	tty_flip_buffer_push(&port->port);
+
+	tty_kref_put(tty);
+}
+
+static struct tty_driver *vtty_driver;
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+static int vconsole_setup(struct console *co, char *options);
+static void vconsole_write(struct console *co, const char *p, unsigned count);
+static struct tty_driver *vconsole_device(struct console *co, int *index);
+#endif
+
+static int
+vtty_probe(struct platform_device *pdev)
+{
+	struct vtty_port *vtty_port;
+	struct device *tty_dev;
+	u32 reg[2];
+	int vtty_id, irq, err;
+
+	vtty_id = of_alias_get_id(pdev->dev.of_node, "vserial");
+	if (vtty_id < 0)
+		vtty_id = of_alias_get_id(pdev->dev.of_node, "serial");
+
+	if (vtty_id < 0 || vtty_id >= MAX_VTTYS) {
+		err = -ENXIO;
+		goto fail_vtty_id;
+	}
+
+	vtty_port = &ports[vtty_id];
+	if (vtty_port->exists) {
+		dev_err(&pdev->dev, "vtty port already exists\n");
+		err = -ENODEV;
+		goto fail_vtty_id;
+	}
+
+	if (of_property_read_u32_array(pdev->dev.of_node, "reg", reg, 2)) {
+		dev_err(&pdev->dev, "need 2 reg resources\n");
+		err = -ENODEV;
+		goto fail_vtty_id;
+	}
+
+	dev_set_drvdata(&pdev->dev, vtty_port);
+
+	/* Set up and register the tty port */
+	vtty_port->dev = &pdev->dev;
+	vtty_port->vtty_id = vtty_id;
+	tty_port_init(&vtty_port->port);
+	vtty_port->port.ops = &vtty_port_ops;
+
+	vtty_port->pipe_tx_kcap = reg[0];
+	vtty_port->pipe_rx_kcap = reg[1];
+	vtty_port->max_msg_size = MAX_MSG_SIZE;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "no tx irq resource?\n");
+		err = -ENODEV;
+		goto fail_of;
+	}
+	vtty_port->tx_irq = irq;
+
+	irq = platform_get_irq(pdev, 1);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "no rx irq resource?\n");
+		err = -ENODEV;
+		goto fail_of;
+	}
+	vtty_port->rx_irq = irq;
+
+	vtty_port->exists = true;
+
+	spin_lock_init(&vtty_port->write_lock);
+	INIT_WORK(&vtty_port->read_work, vtty_read_task);
+
+	vtty_port->read_buf = kmalloc(vtty_port->max_msg_size + sizeof(u32),
+		GFP_KERNEL);
+	if (!vtty_port->read_buf) {
+		dev_err(&pdev->dev, "%s: bad kmalloc\n", __func__);
+		err = -ENOMEM;
+		goto fail_malloc_read;
+	}
+	vtty_port->read_buf_pos = 0;
+	vtty_port->read_buf_len = 0;
+
+	vtty_port->write_buf = kmalloc(vtty_port->max_msg_size + sizeof(u32),
+		GFP_KERNEL);
+	if (!vtty_port->write_buf) {
+		dev_err(&pdev->dev, "%s: bad kmalloc\n", __func__);
+		err = -ENOMEM;
+		goto fail_malloc_write;
+	}
+
+	tty_dev = tty_register_device(vtty_driver, vtty_id, &pdev->dev);
+	if (IS_ERR(tty_dev)) {
+		dev_err(&pdev->dev, "%s: can't register "DEVICE_NAME"%d: %ld\n",
+			__func__, vtty_id, PTR_ERR(tty_dev));
+		err = PTR_ERR(tty_dev);
+		goto fail_tty_register;
+	}
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+	/* Set up and register the port's console device */
+	strlcpy(vtty_port->console.name, DEVICE_PREFIX,
+		sizeof(vtty_port->console.name));
+	vtty_port->console.write = vconsole_write;
+	vtty_port->console.flags = CON_PRINTBUFFER;
+	vtty_port->console.device = vconsole_device;
+	vtty_port->console.setup = vconsole_setup;
+	vtty_port->console.index = vtty_id;
+
+	register_console(&vtty_port->console);
+#endif
+
+	return 0;
+
+fail_tty_register:
+	kfree(vtty_port->write_buf);
+fail_malloc_write:
+	kfree(vtty_port->read_buf);
+	vtty_port->exists = false;
+fail_of:
+fail_vtty_id:
+fail_malloc_read:
+	dev_set_drvdata(&pdev->dev, NULL);
+	return err;
+}
+
+static int
+vtty_remove(struct platform_device *pdev)
+{
+	struct vtty_port *vtty_port = dev_get_drvdata(&pdev->dev);
+
+	if (!vtty_port->exists)
+		return -ENOENT;
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+	unregister_console(&vtty_port->console);
+#endif
+	tty_unregister_device(vtty_driver, vtty_port->vtty_id);
+	vtty_port->exists = false;
+	kfree(vtty_port->write_buf);
+	kfree(vtty_port->read_buf);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+	devm_kfree(&pdev->dev, vtty_port);
+
+	return 0;
+}
+
+static const struct of_device_id vtty_match[] = {
+	{
+		.compatible = "okl,pipe-tty",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, vtty_match);
+
+static struct platform_driver driver = {
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = vtty_match,
+	},
+	.probe		= vtty_probe,
+	.remove		= vtty_remove,
+};
+
+
+static int __init vtty_init(void)
+{
+	int err;
+
+	/* Allocate workqueue */
+	read_workqueue = create_workqueue("okl4vtty");
+	if (read_workqueue == NULL) {
+		err = -ENOMEM;
+		goto fail_create_workqueue;
+	}
+
+	/* Set up the tty driver. */
+	vtty_driver = alloc_tty_driver(MAX_VTTYS);
+	if (vtty_driver == NULL) {
+		err = -ENOMEM;
+		goto fail_alloc_tty_driver;
+	}
+
+	vtty_driver->owner = THIS_MODULE;
+	vtty_driver->driver_name = DRIVER_NAME;
+	vtty_driver->name = DEVICE_PREFIX;
+	vtty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	vtty_driver->subtype = SERIAL_TYPE_NORMAL;
+	vtty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	vtty_driver->init_termios = tty_std_termios;
+
+	/* These flags don't really matter; just use sensible defaults. */
+	vtty_driver->init_termios.c_cflag =
+			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+	vtty_driver->init_termios.c_ispeed = 9600;
+	vtty_driver->init_termios.c_ospeed = 9600;
+
+	tty_set_operations(vtty_driver, &vtty_ops);
+
+	err = tty_register_driver(vtty_driver);
+	if (err)
+		goto fail_tty_driver_register;
+
+	err = platform_driver_register(&driver);
+	if (err)
+		goto fail_mv_driver_register;
+
+	return 0;
+
+fail_mv_driver_register:
+	tty_unregister_driver(vtty_driver);
+fail_tty_driver_register:
+	put_tty_driver(vtty_driver);
+	vtty_driver = NULL;
+fail_alloc_tty_driver:
+	destroy_workqueue(read_workqueue);
+	read_workqueue = NULL;
+fail_create_workqueue:
+	return err;
+}
+
+static void __exit vtty_exit(void)
+{
+	platform_driver_unregister(&driver);
+
+	tty_unregister_driver(vtty_driver);
+	put_tty_driver(vtty_driver);
+	vtty_driver = NULL;
+	destroy_workqueue(read_workqueue);
+	read_workqueue = NULL;
+}
+
+module_init(vtty_init);
+module_exit(vtty_exit);
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+
+static u32 cycle_limit = 0;
+
+static int
+vconsole_setup(struct console *co, char *options)
+{
+	struct vtty_port *port;
+
+	if (co->index < 0 || co->index >= MAX_VTTYS)
+		co->index = 0;
+
+	port = &ports[co->index];
+	if (!port->exists)
+		return -ENODEV;
+
+	cycle_limit = arch_timer_get_rate() * 20 / MSEC_PER_SEC;
+	if (cycle_limit == 0) {
+		cycle_limit = -1;
+	}
+	return 0;
+}
+
+#ifdef CONFIG_OKL4_INTERLEAVED_PRIORITIES
+extern int vcpu_prio_normal;
+#endif
+
+static void
+vconsole_write(struct console *co, const char *p, unsigned count)
+{
+	struct vtty_port *port = &ports[co->index];
+	size_t bytes_remaining = count;
+	char buf[MAX_MSG_SIZE + sizeof(u32)];
+	cycles_t last_sent_start = get_cycles();
+	static int pipe_full = 0;
+
+	memset(buf, 0, sizeof(buf));
+
+	while (bytes_remaining > 0) {
+		unsigned to_send = min(port->max_msg_size, bytes_remaining);
+		unsigned send = roundup(to_send + sizeof(u32), sizeof(u32));
+		okl4_error_t ret;
+
+		*(u32 *)buf = to_send;
+		memcpy(&buf[sizeof(u32)], p, to_send);
+
+		ret = _okl4_sys_pipe_send(port->pipe_tx_kcap, send,
+				(void *)buf);
+
+		if (ret == OKL4_ERROR_PIPE_NOT_READY) {
+			okl4_pipe_control_t x = 0;
+
+			okl4_pipe_control_setdoop(&x, true);
+			okl4_pipe_control_setoperation(&x,
+					OKL4_PIPE_CONTROL_OP_SET_TX_READY);
+			_okl4_sys_pipe_control(port->pipe_tx_kcap, x);
+			continue;
+		}
+
+		if (ret == OKL4_ERROR_PIPE_FULL) {
+			cycles_t last_sent_cycles = get_cycles() -
+					last_sent_start;
+			if (last_sent_cycles > cycle_limit || pipe_full) {
+				pipe_full = 1;
+				return;
+			}
+#ifdef CONFIG_OKL4_INTERLEAVED_PRIORITIES
+			_okl4_sys_priority_waive(vcpu_prio_normal);
+#else
+			_okl4_sys_priority_waive(0);
+#endif
+			continue;
+		}
+
+		if (ret != OKL4_OK) {
+			/*
+			 * We cannot call printk here since that will end up
+			 * calling back here and make things worse. We just
+			 * have to return and hope that the problem corrects
+			 * itself.
+			 */
+			return;
+		}
+
+		p += to_send;
+		bytes_remaining -= to_send;
+		last_sent_start = get_cycles();
+		pipe_full = 0;
+	}
+}
+
+struct tty_driver *
+vconsole_device(struct console *co, int *index)
+{
+	*index = co->index;
+	return vtty_driver;
+}
+
+#endif /* CONFIG_OKL4_VTTY_CONSOLE */
+
+MODULE_DESCRIPTION("OKL4 virtual TTY driver");
+MODULE_AUTHOR("Philip Derrin <philipd@ok-labs.com>");
diff --git a/drivers/vservices/Kconfig b/drivers/vservices/Kconfig
new file mode 100644
index 0000000..16b3bda
--- /dev/null
+++ b/drivers/vservices/Kconfig
@@ -0,0 +1,81 @@
+#
+# OKL4 Virtual Services framework
+#
+
+menuconfig VSERVICES_SUPPORT
+	tristate "OKL4 Virtual Services support"
+	default OKL4_GUEST || OKL4_VIRTUALISATION
+	select HOTPLUG
+	help
+	  This option adds core support for OKL4 Virtual Services. The Virtual
+	  Services framework is an inter-OS device/service sharing
+	  protocol which is supported on OKL4 Microvisor virtualization
+	  platforms. You will also need drivers from the following menu in
+	  order to make use of it.
+
+if VSERVICES_SUPPORT
+
+config VSERVICES_CHAR_DEV
+	bool "Virtual Services user-space service API"
+	default y
+	help
+	  Select this if you want to use user-space service drivers. You will
+	  also need udev rules that create device nodes, and protocol code
+	  generated by the OK Mill tool.
+
+config VSERVICES_DEBUG
+	bool "Virtual Services debugging support"
+	help
+	  Select this if you want to enable Virtual Services core framework
+	  debugging. The debug messages for various components of the Virtual
+	  Services core framework can be toggled at runtime on a per-session
+	  basis via sysfs. When Virtual Services debugging is enabled here,
+	  but disabled at runtime it has a minimal performance impact.
+
+config VSERVICES_LOCK_DEBUG
+	bool "Debug Virtual Services state locks"
+	default DEBUG_KERNEL
+	help
+	  This option enables some runtime checks that Virtual Services
+	  state lock functions are used correctly in service drivers.
+
+config VSERVICES_SERVER
+	tristate "Virtual Services server support"
+	depends on SYSFS
+	default y
+	help
+	  This option adds support for Virtual Services servers, which allows
+	  exporting of services from this Linux to other environments. Servers
+	  are created at runtime by writing to files in
+	  /sys/bus/vservices-server.
+
+config VSERVICES_CLIENT
+	tristate "Virtual Services client support"
+	default y
+	help
+	  This option adds support for Virtual Services clients, which allows
+	  connecting to services exported from other environments.
+
+config VSERVICES_SKELETON_DRIVER
+	tristate "Virtual Services skeleton driver"
+	depends on VSERVICES_SERVER || VSERVICES_CLIENT
+	default n
+	help
+	  This option adds support for a skeleton virtual service driver. This
+	  driver can be used for templating or testing of virtual service
+	  drivers. If unsure say N.
+
+config VSERVICES_NAMED_DEVICE
+	bool "Virtual Services use named device node in /dev"
+	default n
+	help
+	  Select this if you want to use a named device name over a numeric
+	  device name in /dev
+
+source "drivers/vservices/transport/Kconfig"
+
+source "drivers/vservices/protocol/Kconfig"
+
+source "drivers/vservices/Kconfig.stacks"
+
+endif # VSERVICES_SUPPORT
diff --git a/drivers/vservices/Kconfig.stacks b/drivers/vservices/Kconfig.stacks
new file mode 100644
index 0000000..97eba53
--- /dev/null
+++ b/drivers/vservices/Kconfig.stacks
@@ -0,0 +1,7 @@
+#
+# vServices drivers configuration
+#
+
+menu "Client and Server drivers"
+
+endmenu
diff --git a/drivers/vservices/Makefile b/drivers/vservices/Makefile
new file mode 100644
index 0000000..685ba0a
--- /dev/null
+++ b/drivers/vservices/Makefile
@@ -0,0 +1,16 @@
+ccflags-y += -Werror
+ccflags-$(CONFIG_VSERVICES_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VSERVICES_SUPPORT)	+= vservices.o
+vservices-objs-$(CONFIG_VSERVICES_CHAR_DEV) += devio.o
+vservices-objs = session.o $(vservices-objs-y)
+
+obj-$(CONFIG_VSERVICES_CLIENT) += core_client.o
+obj-$(CONFIG_VSERVICES_SERVER) += core_server.o
+
+obj-$(CONFIG_VSERVICES_SKELETON_DRIVER) += vservices_skeleton_driver.o
+vservices_skeleton_driver-objs = skeleton_driver.o
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += transport/
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += protocol/
diff --git a/drivers/vservices/compat.h b/drivers/vservices/compat.h
new file mode 100644
index 0000000..5f6926d
--- /dev/null
+++ b/drivers/vservices/compat.h
@@ -0,0 +1,59 @@
+/*
+ * drivers/vservices/compat.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Wrapper functions/definitions for compatibility between differnet kernel
+ * versions.
+ */
+
+#ifndef _VSERVICES_COMPAT_H
+#define _VSERVICES_COMPAT_H
+
+#include <linux/workqueue.h>
+#include <linux/version.h>
+
+/* The INIT_WORK_ONSTACK macro has a slightly different name in older kernels */
+#ifndef INIT_WORK_ONSTACK
+#define INIT_WORK_ONSTACK(_work, _func) INIT_WORK_ON_STACK(_work, _func)
+#endif
+
+/*
+ * We require a workqueue with  no concurrency. This is provided by
+ * create_singlethread_workqueue() in kernel prior to 2.6.36.
+ * In later versions, create_singlethread_workqueue() enables WQ_MEM_RECLAIM and
+ * thus WQ_RESCUER, which allows work items to be grabbed by a rescuer thread
+ * and run concurrently if the queue is running too slowly. We must use
+ * alloc_ordered_workqueue() instead, to disable the rescuer.
+ */
+static inline struct workqueue_struct *
+vs_create_workqueue(const char *name)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+	return create_singlethread_workqueue(name);
+#else
+	return alloc_ordered_workqueue(name, 0);
+#endif
+}
+
+/*
+ * The max3 macro has only been present from 2.6.37
+ * (commit: f27c85c56b32c42bcc54a43189c1e00fdceb23ec)
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
+#define max3(x, y, z) ({			\
+	typeof(x) _max1 = (x);			\
+	typeof(y) _max2 = (y);			\
+	typeof(z) _max3 = (z);			\
+	(void) (&_max1 == &_max2);		\
+	(void) (&_max1 == &_max3);		\
+	_max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
+		(_max2 > _max3 ? _max2 : _max3); })
+#endif
+
+#endif /* _VSERVICES_COMPAT_H */
diff --git a/drivers/vservices/core_client.c b/drivers/vservices/core_client.c
new file mode 100644
index 0000000..9ac65d2
--- /dev/null
+++ b/drivers/vservices/core_client.c
@@ -0,0 +1,735 @@
+/*
+ * drivers/vservices/core_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Client side core service application driver. This is responsible for:
+ *
+ *  - automatically connecting to the server when it becomes ready;
+ *  - sending a reset command to the server if something has gone wrong; and
+ *  - enumerating all the available services.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <vservices/types.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/client.h>
+
+#include "session.h"
+#include "transport.h"
+#include "compat.h"
+
+struct core_client {
+	struct vs_client_core_state	state;
+	struct vs_service_device	*service;
+
+	struct list_head		message_queue;
+	struct mutex			message_queue_lock;
+	struct work_struct		message_queue_work;
+};
+
+struct pending_reset {
+	struct vs_service_device	*service;
+	struct list_head		list;
+};
+
+#define to_core_client(x)	container_of(x, struct core_client, state)
+#define dev_to_core_client(x)	to_core_client(dev_get_drvdata(x))
+
+static int vs_client_core_fatal_error(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+
+	/* Force a transport level reset */
+	dev_err(&client->service->dev," Fatal error - resetting session\n");
+	return -EPROTO;
+}
+
+static struct core_client *
+vs_client_session_core_client(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service = session->core_service;
+
+	if (!core_service)
+		return NULL;
+
+	return dev_to_core_client(&core_service->dev);
+}
+
+static ssize_t client_core_reset_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *core_service = to_vs_service_device(dev);
+	struct vs_session_device *session =
+		vs_service_get_session(core_service);
+	struct vs_service_device *target;
+	vs_service_id_t service_id;
+	unsigned long val;
+	int err;
+
+	/* Writing a valid service id to this file resets that service */
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	service_id = val;
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -ENODEV;
+
+	err = vs_service_reset(target, core_service);
+
+	vs_put_service(target);
+	return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR(reset_service, S_IWUSR, NULL,
+		client_core_reset_service_store);
+
+static struct attribute *client_core_dev_attrs[] = {
+	&dev_attr_reset_service.attr,
+	NULL,
+};
+
+static const struct attribute_group client_core_attr_group = {
+	.attrs = client_core_dev_attrs,
+};
+
+/*
+ * Protocol callbacks
+ */
+static int
+vs_client_core_handle_service_removed(struct vs_client_core_state *state,
+		u32 service_id)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+			vs_service_get_session(client->service);
+	struct vs_service_device *service;
+	int ret;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service)
+		return -EINVAL;
+
+	ret = vs_service_handle_delete(service);
+	vs_put_service(service);
+	return ret;
+}
+
+static int vs_client_core_create_service(struct core_client *client,
+		struct vs_session_device *session, vs_service_id_t service_id,
+		struct vs_string *protocol_name_string,
+		struct vs_string *service_name_string)
+{
+	char *protocol_name, *service_name;
+	struct vs_service_device *service;
+	int ret = 0;
+
+	protocol_name = vs_string_dup(protocol_name_string, GFP_KERNEL);
+	if (!protocol_name) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	service_name = vs_string_dup(service_name_string, GFP_KERNEL);
+	if (!service_name) {
+		ret = -ENOMEM;
+		goto out_free_protocol_name;
+	}
+
+	service = vs_service_register(session, client->service, service_id,
+			protocol_name, service_name, NULL);
+	if (IS_ERR(service)) {
+		ret = PTR_ERR(service);
+		goto out_free_service_name;
+	}
+
+	vs_service_start(service);
+
+out_free_service_name:
+	kfree(service_name);
+out_free_protocol_name:
+	kfree(protocol_name);
+out:
+	return ret;
+}
+
+static int
+vs_client_core_handle_service_created(struct vs_client_core_state *state,
+		u32 service_id, struct vs_string service_name,
+		struct vs_string protocol_name, struct vs_mbuf *mbuf)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+			vs_service_get_session(client->service);
+	int err;
+
+	vs_dev_debug(VS_DEBUG_CLIENT_CORE,
+			vs_service_get_session(client->service),
+			&client->service->dev, "Service info for %d received\n",
+			service_id);
+
+	err = vs_client_core_create_service(client, session, service_id,
+			&protocol_name, &service_name);
+	if (err)
+		dev_err(&session->dev,
+				"Failed to create service with id %d: %d\n",
+				service_id, err);
+
+	vs_client_core_core_free_service_created(state, &service_name,
+			&protocol_name, mbuf);
+
+	return err;
+}
+
+static int
+vs_client_core_send_service_reset(struct core_client *client,
+		struct vs_service_device *service)
+{
+	return vs_client_core_core_send_service_reset(&client->state,
+			service->id, GFP_KERNEL);
+}
+
+static int
+vs_client_core_queue_service_reset(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_client *client =
+		vs_client_session_core_client(session);
+	struct pending_reset *msg;
+
+	if (!client)
+		return -ENODEV;
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+			"Sending reset for service %d\n", service->id);
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	mutex_lock(&client->message_queue_lock);
+
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+	list_add_tail(&msg->list, &client->message_queue);
+
+	mutex_unlock(&client->message_queue_lock);
+	queue_work(client->service->work_queue, &client->message_queue_work);
+
+	return 0;
+}
+
+static int vs_core_client_tx_ready(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+
+	queue_work(client->service->work_queue, &client->message_queue_work);
+
+	return 0;
+}
+
+static void message_queue_work(struct work_struct *work)
+{
+	struct core_client *client = container_of(work, struct core_client,
+			message_queue_work);
+	struct vs_session_device *session =
+		vs_service_get_session(client->service);
+	struct pending_reset *msg;
+	int err;
+
+	vs_service_state_lock(client->service);
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(client->state.state.core)) {
+		vs_service_state_unlock(client->service);
+		return;
+	}
+
+	vs_dev_debug(VS_DEBUG_CLIENT, session, &session->dev, "tx_ready\n");
+
+	mutex_lock(&client->message_queue_lock);
+	while (!list_empty(&client->message_queue)) {
+		msg = list_first_entry(&client->message_queue,
+				struct pending_reset, list);
+
+		err = vs_client_core_send_service_reset(client, msg->service);
+
+		/* If we're out of quota there's no point continuing */
+		if (err == -ENOBUFS)
+			break;
+
+		/* Any other error is fatal */
+		if (err < 0) {
+			dev_err(&client->service->dev,
+					"Failed to send pending reset for %d (%d) - resetting session\n",
+					msg->service->id, err);
+			vs_service_reset_nosync(client->service);
+			break;
+		}
+
+		/*
+		 * The message sent successfully - remove it from the queue.
+		 * The corresponding vs_get_service() was done when the pending
+		 * message was enqueued.
+		 */
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+	mutex_unlock(&client->message_queue_lock);
+	vs_service_state_unlock(client->service);
+}
+
+static int
+vs_client_core_handle_server_ready(struct vs_client_core_state *state,
+		u32 service_id, u32 in_quota, u32 out_quota, u32 in_bit_offset,
+		u32 in_num_bits, u32 out_bit_offset, u32 out_num_bits)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session;
+	struct vs_service_device *service;
+	int ret;
+
+	if (service_id == 0)
+		return -EPROTO;
+
+	if (!in_quota || !out_quota)
+		return -EINVAL;
+
+	session = vs_service_get_session(client->service);
+	service = vs_session_get_service(session, service_id);
+	if (!service)
+		return -EINVAL;
+
+	service->send_quota = in_quota;
+	service->recv_quota = out_quota;
+	service->notify_send_offset = in_bit_offset;
+	service->notify_send_bits = in_num_bits;
+	service->notify_recv_offset = out_bit_offset;
+	service->notify_recv_bits = out_num_bits;
+
+	ret = vs_service_enable(service);
+	vs_put_service(service);
+	return ret;
+}
+
+static int
+vs_client_core_handle_service_reset(struct vs_client_core_state *state,
+		u32 service_id)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session;
+
+	if (service_id == 0)
+		return -EPROTO;
+
+	session = vs_service_get_session(client->service);
+
+	return vs_service_handle_reset(session, service_id, true);
+}
+
+static void vs_core_client_start(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+			vs_service_get_session(client->service);
+
+	/* FIXME - start callback should return int */
+	vs_dev_debug(VS_DEBUG_CLIENT_CORE, session, &client->service->dev,
+			"Core client start\n");
+}
+
+static void vs_core_client_reset(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+		vs_service_get_session(client->service);
+	struct pending_reset *msg;
+
+	/* Flush the pending resets - we're about to delete everything */
+	while (!list_empty(&client->message_queue)) {
+		msg = list_first_entry(&client->message_queue,
+				struct pending_reset, list);
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+
+	vs_session_delete_noncore(session);
+
+	/* Return to the initial quotas, until the next startup message */
+	client->service->send_quota = 0;
+	client->service->recv_quota = 1;
+}
+
+static int vs_core_client_startup(struct vs_client_core_state *state,
+		u32 core_in_quota, u32 core_out_quota)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_service_device *service = state->service;
+	struct vs_session_device *session = vs_service_get_session(service);
+	int ret;
+
+	if (!core_in_quota || !core_out_quota)
+		return -EINVAL;
+
+	/*
+	 * Update the service struct with our real quotas and tell the
+	 * transport about the change
+	 */
+
+	service->send_quota = core_in_quota;
+	service->recv_quota = core_out_quota;
+	ret = session->transport->vt->service_start(session->transport, service);
+	if (ret < 0)
+		return ret;
+
+	WARN_ON(!list_empty(&client->message_queue));
+
+	return vs_client_core_core_req_connect(state, GFP_KERNEL);
+}
+
+static struct vs_client_core_state *
+vs_core_client_alloc(struct vs_service_device *service)
+{
+	struct core_client *client;
+	int err;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		goto fail;
+
+	client->service = service;
+	INIT_LIST_HEAD(&client->message_queue);
+	INIT_WORK(&client->message_queue_work, message_queue_work);
+	mutex_init(&client->message_queue_lock);
+
+	err = sysfs_create_group(&service->dev.kobj, &client_core_attr_group);
+	if (err)
+		goto fail_free_client;
+
+	/*
+	 * Default transport resources for the core service client. The
+	 * server will inform us of the real quotas in the startup message.
+	 * Note that it is important that the quotas never decrease, so these
+	 * numbers are as small as possible.
+	 */
+	service->send_quota = 0;
+	service->recv_quota = 1;
+	service->notify_send_bits = 0;
+	service->notify_send_offset = 0;
+	service->notify_recv_bits = 0;
+	service->notify_recv_offset = 0;
+
+	return &client->state;
+
+fail_free_client:
+	kfree(client);
+fail:
+	return NULL;
+}
+
+static void vs_core_client_release(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+
+	sysfs_remove_group(&client->service->dev.kobj, &client_core_attr_group);
+	kfree(client);
+}
+
+static struct vs_client_core vs_core_client_driver = {
+	.alloc		= vs_core_client_alloc,
+	.release	= vs_core_client_release,
+	.start		= vs_core_client_start,
+	.reset		= vs_core_client_reset,
+	.tx_ready	= vs_core_client_tx_ready,
+
+	.core = {
+		.nack_connect		= vs_client_core_fatal_error,
+
+		/* FIXME: Jira ticket SDK-3074 - ryanm. */
+		.ack_disconnect		= vs_client_core_fatal_error,
+		.nack_disconnect	= vs_client_core_fatal_error,
+
+		.msg_service_created	= vs_client_core_handle_service_created,
+		.msg_service_removed	= vs_client_core_handle_service_removed,
+
+		.msg_startup		= vs_core_client_startup,
+		/* FIXME: Jira ticket SDK-3074 - philipd. */
+		.msg_shutdown		= vs_client_core_fatal_error,
+		.msg_server_ready	= vs_client_core_handle_server_ready,
+		.msg_service_reset	= vs_client_core_handle_service_reset,
+	},
+};
+
+/*
+ * Client bus driver
+ */
+static int vs_client_bus_match(struct device *dev, struct device_driver *driver)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
+
+	/* Don't match anything to the devio driver; it's bound manually */
+	if (!vsdrv->protocol)
+		return 0;
+
+	WARN_ON_ONCE(service->is_server || vsdrv->is_server);
+
+	/* Match if the protocol strings are the same */
+	if (strcmp(service->protocol, vsdrv->protocol) == 0)
+		return 1;
+
+	return 0;
+}
+
+static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
+}
+
+static DEVICE_ATTR_RO(is_server);
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
+}
+
+static DEVICE_ATTR_RO(id);
+
+static ssize_t dev_protocol_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
+}
+
+static DEVICE_ATTR(protocol, 0444, dev_protocol_show, NULL);
+
+static ssize_t service_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
+}
+
+static DEVICE_ATTR_RO(service_name);
+
+static ssize_t quota_in_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->send_quota);
+}
+
+static DEVICE_ATTR_RO(quota_in);
+
+static ssize_t quota_out_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->recv_quota);
+}
+
+static DEVICE_ATTR_RO(quota_out);
+
+static struct attribute *vs_client_dev_attrs[] = {
+	&dev_attr_id.attr,
+	&dev_attr_is_server.attr,
+	&dev_attr_protocol.attr,
+	&dev_attr_service_name.attr,
+	&dev_attr_quota_in.attr,
+	&dev_attr_quota_out.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vs_client_dev);
+
+static ssize_t protocol_show(struct device_driver *drv, char *buf)
+{
+	struct vs_service_driver *driver = to_vs_service_driver(drv);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", driver->protocol);
+}
+
+static DRIVER_ATTR_RO(protocol);
+
+static struct attribute *vs_client_drv_attrs[] = {
+	&driver_attr_protocol.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vs_client_drv);
+
+struct bus_type vs_client_bus_type = {
+	.name		= "vservices-client",
+	.dev_groups	= vs_client_dev_groups,
+	.drv_groups	= vs_client_drv_groups,
+	.match		= vs_client_bus_match,
+	.probe		= vs_service_bus_probe,
+	.remove		= vs_service_bus_remove,
+	.uevent		= vs_service_bus_uevent,
+};
+EXPORT_SYMBOL(vs_client_bus_type);
+
+/*
+ * Client session driver
+ */
+static int vs_client_session_probe(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_service_device *service;
+	char *protocol, *name;
+	int ret = 0;
+
+	if (session->is_server) {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	/* create a service for the core protocol client */
+	protocol = kstrdup(VSERVICE_CORE_PROTOCOL_NAME, GFP_KERNEL);
+	if (!protocol) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	name = kstrdup("core", GFP_KERNEL);
+	if (!name) {
+		ret = -ENOMEM;
+		goto fail_free_protocol;
+	}
+
+	service = vs_service_register(session, NULL, 0, protocol, name, NULL);
+	if (IS_ERR(service)) {
+		ret = PTR_ERR(service);
+		goto fail_free_name;
+	}
+
+fail_free_name:
+	kfree(name);
+fail_free_protocol:
+	kfree(protocol);
+fail:
+	return ret;
+}
+
+static int
+vs_client_session_send_service_reset(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	if (WARN_ON(service->id == 0))
+		return -EINVAL;
+
+	return vs_client_core_queue_service_reset(session, service);
+}
+
+static struct vs_session_driver vs_client_session_driver = {
+	.driver	= {
+		.name			= "vservices-client-session",
+		.owner			= THIS_MODULE,
+		.bus			= &vs_session_bus_type,
+		.probe			= vs_client_session_probe,
+		.suppress_bind_attrs	= true,
+	},
+	.is_server		= false,
+	.service_bus		= &vs_client_bus_type,
+	.service_local_reset	= vs_client_session_send_service_reset,
+};
+
+static int __init vs_core_client_init(void)
+{
+	int ret;
+
+	ret = bus_register(&vs_client_bus_type);
+	if (ret)
+		goto fail_bus_register;
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	vs_devio_client_driver.driver.bus = &vs_client_bus_type;
+	vs_devio_client_driver.driver.owner = THIS_MODULE;
+	ret = driver_register(&vs_devio_client_driver.driver);
+	if (ret)
+		goto fail_devio_register;
+#endif
+
+	ret = driver_register(&vs_client_session_driver.driver);
+	if (ret)
+		goto fail_driver_register;
+
+	ret = vservice_core_client_register(&vs_core_client_driver,
+			"vs_core_client");
+	if (ret)
+		goto fail_core_register;
+
+	vservices_client_root = kobject_create_and_add("client-sessions",
+			vservices_root);
+	if (!vservices_client_root) {
+		ret = -ENOMEM;
+		goto fail_create_root;
+	}
+
+	return 0;
+
+fail_create_root:
+	vservice_core_client_unregister(&vs_core_client_driver);
+fail_core_register:
+	driver_unregister(&vs_client_session_driver.driver);
+fail_driver_register:
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_client_driver.driver);
+	vs_devio_client_driver.driver.bus = NULL;
+	vs_devio_client_driver.driver.owner = NULL;
+fail_devio_register:
+#endif
+	bus_unregister(&vs_client_bus_type);
+fail_bus_register:
+	return ret;
+}
+
+static void __exit vs_core_client_exit(void)
+{
+	kobject_put(vservices_client_root);
+	vservice_core_client_unregister(&vs_core_client_driver);
+	driver_unregister(&vs_client_session_driver.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_client_driver.driver);
+	vs_devio_client_driver.driver.bus = NULL;
+	vs_devio_client_driver.driver.owner = NULL;
+#endif
+	bus_unregister(&vs_client_bus_type);
+}
+
+subsys_initcall(vs_core_client_init);
+module_exit(vs_core_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Core Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/core_server.c b/drivers/vservices/core_server.c
new file mode 100644
index 0000000..48d1d1b
--- /dev/null
+++ b/drivers/vservices/core_server.c
@@ -0,0 +1,1649 @@
+/*
+ * drivers/vservices/core_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Server side core service application driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+
+#include <vservices/types.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/server.h>
+
+#include "transport.h"
+#include "session.h"
+#include "compat.h"
+
+#define VSERVICE_CORE_SERVICE_NAME	"core"
+
+struct core_server {
+	struct vs_server_core_state	state;
+	struct vs_service_device	*service;
+
+	/*
+	 * A list of messages to send, a mutex protecting it, and a
+	 * work item to process the list.
+	 */
+	struct list_head		message_queue;
+	struct mutex			message_queue_lock;
+	struct work_struct		message_queue_work;
+
+	struct mutex			alloc_lock;
+
+	/* The following are all protected by alloc_lock. */
+	unsigned long			*in_notify_map;
+	int				in_notify_map_bits;
+
+	unsigned long			*out_notify_map;
+	int				out_notify_map_bits;
+
+	unsigned			in_quota_remaining;
+	unsigned			out_quota_remaining;
+};
+
+/*
+ * Used for message deferral when the core service is over quota.
+ */
+struct pending_message {
+	vservice_core_message_id_t		type;
+	struct vs_service_device		*service;
+	struct list_head			list;
+};
+
+#define to_core_server(x)	container_of(x, struct core_server, state)
+#define dev_to_core_server(x)	to_core_server(dev_get_drvdata(x))
+
+static struct vs_session_device *
+vs_core_server_session(struct core_server *server)
+{
+	return vs_service_get_session(server->service);
+}
+
+static struct core_server *
+vs_server_session_core_server(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service = session->core_service;
+
+	if (!core_service)
+		return NULL;
+
+	return dev_to_core_server(&core_service->dev);
+}
+
+static int vs_server_core_send_service_removed(struct core_server *server,
+		struct vs_service_device *service)
+{
+	return vs_server_core_core_send_service_removed(&server->state,
+			service->id, GFP_KERNEL);
+}
+
+static bool
+cancel_pending_created(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	list_for_each_entry(msg, &server->message_queue, list) {
+		if (msg->type == VSERVICE_CORE_CORE_MSG_SERVICE_CREATED &&
+				msg->service == service) {
+			vs_put_service(msg->service);
+			list_del(&msg->list);
+			kfree(msg);
+
+			/* there can only be one */
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int vs_server_core_queue_service_removed(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	mutex_lock(&server->message_queue_lock);
+
+	/*
+	 * If we haven't sent the notification that the service was created,
+	 * nuke it and do nothing else.
+	 *
+	 * This is not just an optimisation; see below.
+	 */
+	if (cancel_pending_created(server, service)) {
+		mutex_unlock(&server->message_queue_lock);
+		return 0;
+	}
+
+	/*
+	 * Do nothing if the core state is not connected. We must avoid
+	 * queueing service_removed messages on a reset service.
+	 *
+	 * Note that we cannot take the core server state lock here, because
+	 * we may (or may not) have been called from a core service message
+	 * handler. Thus, we must beware of races with changes to this
+	 * condition:
+	 *
+	 * - It becomes true when the req_connect handler sends an
+	 *   ack_connect, *after* it queues service_created for each existing
+	 *   service (while holding the service ready lock). The handler sends
+	 *   ack_connect with the message queue lock held.
+	 *
+	 *   - If we see the service as connected, then the req_connect
+	 *     handler has already queued and sent a service_created for this
+	 *     service, so it's ok for us to send a service_removed.
+	 *
+	 *   - If we see it as disconnected, the req_connect handler hasn't
+	 *     taken the message queue lock to send ack_connect yet, and thus
+	 *     has not released the service state lock; so if it queued a
+	 *     service_created we caught it in the flush above before it was
+	 *     sent.
+	 *
+	 * - It becomes false before the reset / disconnect handlers are
+	 *   called and those will both flush the message queue afterwards.
+	 *
+	 *   - If we see the service as connected, then the reset / disconnect
+	 *     handler is going to flush the message.
+	 *
+	 *   - If we see it disconnected, the state change has occurred and
+	 *     implicitly had the same effect as this message, so doing
+	 *     nothing is correct.
+	 *
+	 * Note that ordering in all of the above cases is guaranteed by the
+	 * message queue lock.
+	 */
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+		mutex_unlock(&server->message_queue_lock);
+		return 0;
+	}
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&server->message_queue_lock);
+		return -ENOMEM;
+	}
+
+	msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+
+	list_add_tail(&msg->list, &server->message_queue);
+
+	mutex_unlock(&server->message_queue_lock);
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static int vs_server_core_send_service_created(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct vs_session_device *session =
+			vs_service_get_session(server->service);
+
+	struct vs_mbuf *mbuf;
+	struct vs_string service_name, protocol_name;
+	size_t service_name_len, protocol_name_len;
+
+	int err;
+
+	mbuf = vs_server_core_core_alloc_service_created(&server->state,
+			&service_name, &protocol_name, GFP_KERNEL);
+
+	if (IS_ERR(mbuf))
+		return PTR_ERR(mbuf);
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+			"Sending service created message for %d (%s:%s)\n",
+			service->id, service->name, service->protocol);
+
+	service_name_len = strlen(service->name);
+	protocol_name_len = strlen(service->protocol);
+
+	if (service_name_len > vs_string_max_size(&service_name) ||
+			protocol_name_len > vs_string_max_size(&protocol_name)) {
+		dev_err(&session->dev,
+				"Invalid name/protocol for service %d (%s:%s)\n",
+				service->id, service->name,
+				service->protocol);
+		err = -EINVAL;
+		goto fail;
+	}
+
+	vs_string_copyin(&service_name, service->name);
+	vs_string_copyin(&protocol_name, service->protocol);
+
+	err = vs_server_core_core_send_service_created(&server->state,
+			service->id, service_name, protocol_name, mbuf);
+	if (err) {
+		dev_err(&session->dev,
+				"Fatal error sending service creation message for %d (%s:%s): %d\n",
+				service->id, service->name,
+				service->protocol, err);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	vs_server_core_core_free_service_created(&server->state,
+			&service_name, &protocol_name, mbuf);
+
+	return err;
+}
+
+static int vs_server_core_queue_service_created(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	lockdep_assert_held(&service->ready_lock);
+	lockdep_assert_held(&server->service->state_mutex);
+
+	mutex_lock(&server->message_queue_lock);
+
+	/*  Do nothing if the core state is disconnected.  */
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+		mutex_unlock(&server->message_queue_lock);
+		return 0;
+	}
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&server->message_queue_lock);
+		return -ENOMEM;
+	}
+
+	msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+
+	list_add_tail(&msg->list, &server->message_queue);
+
+	mutex_unlock(&server->message_queue_lock);
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static struct vs_service_device *
+__vs_server_core_register_service(struct vs_session_device *session,
+		vs_service_id_t service_id, struct vs_service_device *owner,
+		const char *name, const char *protocol, const void *plat_data)
+{
+	if (!session->is_server)
+		return ERR_PTR(-ENODEV);
+
+	if (!name || strnlen(name, VSERVICE_CORE_SERVICE_NAME_SIZE + 1) >
+			VSERVICE_CORE_SERVICE_NAME_SIZE || name[0] == '\n')
+		return ERR_PTR(-EINVAL);
+
+	/* The server core must only be registered as service_id zero */
+	if (service_id == 0 && (owner != NULL ||
+			strcmp(name, VSERVICE_CORE_SERVICE_NAME) != 0 ||
+			strcmp(protocol, VSERVICE_CORE_PROTOCOL_NAME) != 0))
+		return ERR_PTR(-EINVAL);
+
+	return vs_service_register(session, owner, service_id, protocol, name,
+			plat_data);
+}
+
+static struct vs_service_device *
+vs_server_core_create_service(struct core_server *server,
+		struct vs_session_device *session,
+		struct vs_service_device *owner, vs_service_id_t service_id,
+		const char *name, const char *protocol, const void *plat_data)
+{
+	struct vs_service_device *service;
+
+	service = __vs_server_core_register_service(session, service_id,
+			owner, name, protocol, plat_data);
+	if (IS_ERR(service))
+		return service;
+
+	if (protocol) {
+		vs_service_state_lock(server->service);
+		vs_service_start(service);
+		if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
+			vs_service_enable(service);
+		vs_service_state_unlock(server->service);
+	}
+
+	return service;
+}
+
+static int
+vs_server_core_send_service_reset_ready(struct core_server *server,
+		vservice_core_message_id_t type,
+		struct vs_service_device *service)
+{
+	bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(server->service);
+	int err;
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+			"Sending %s for service %d\n",
+			is_reset ? "reset" : "ready", service->id);
+
+	if (is_reset)
+		err = vs_server_core_core_send_service_reset(&server->state,
+				service->id, GFP_KERNEL);
+	else
+		err = vs_server_core_core_send_server_ready(&server->state,
+				service->id, service->recv_quota,
+				service->send_quota,
+				service->notify_recv_offset,
+				service->notify_recv_bits,
+				service->notify_send_offset,
+				service->notify_send_bits,
+				GFP_KERNEL);
+
+	return err;
+}
+
+static bool
+cancel_pending_ready(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	list_for_each_entry(msg, &server->message_queue, list) {
+		if (msg->type == VSERVICE_CORE_CORE_MSG_SERVER_READY &&
+				msg->service == service) {
+			vs_put_service(msg->service);
+			list_del(&msg->list);
+			kfree(msg);
+
+			/* there can only be one */
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int
+vs_server_core_queue_service_reset_ready(struct core_server *server,
+		vservice_core_message_id_t type,
+		struct vs_service_device *service)
+{
+	bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
+	struct pending_message *msg;
+
+	mutex_lock(&server->message_queue_lock);
+
+	/*
+	 * If this is a reset, and there is an outgoing ready in the
+	 * queue, we must cancel it so it can't be sent with invalid
+	 * transport resources, and then return immediately so we
+	 * don't send a redundant reset.
+	 */
+	if (is_reset && cancel_pending_ready(server, service)) {
+		mutex_unlock(&server->message_queue_lock);
+		return VS_SERVICE_ALREADY_RESET;
+	}
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&server->message_queue_lock);
+		return -ENOMEM;
+	}
+
+	msg->type = type;
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+	list_add_tail(&msg->list, &server->message_queue);
+
+	mutex_unlock(&server->message_queue_lock);
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static int vs_core_server_tx_ready(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(server->service);
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev, "tx_ready\n");
+
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static void message_queue_work(struct work_struct *work)
+{
+	struct core_server *server = container_of(work, struct core_server,
+			message_queue_work);
+	struct pending_message *msg;
+	int err;
+
+	vs_service_state_lock(server->service);
+
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+		vs_service_state_unlock(server->service);
+		return;
+	}
+
+	/*
+	 * If any pending message fails we exit the loop immediately so that
+	 * we preserve the message order.
+	 */
+	mutex_lock(&server->message_queue_lock);
+	while (!list_empty(&server->message_queue)) {
+		msg = list_first_entry(&server->message_queue,
+				struct pending_message, list);
+
+		switch (msg->type) {
+		case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
+			err = vs_server_core_send_service_created(server,
+					msg->service);
+			break;
+
+		case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
+			err = vs_server_core_send_service_removed(server,
+					msg->service);
+			break;
+
+		case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+		case VSERVICE_CORE_CORE_MSG_SERVER_READY:
+			err = vs_server_core_send_service_reset_ready(
+					server, msg->type, msg->service);
+			break;
+
+		default:
+			dev_warn(&server->service->dev,
+					"Don't know how to handle pending message type %d\n",
+					msg->type);
+			err = 0;
+			break;
+		}
+
+		/*
+		 * If we're out of quota we exit and wait for tx_ready to
+		 * queue us again.
+		 */
+		if (err == -ENOBUFS)
+			break;
+
+		/* Any other error is fatal */
+		if (err < 0) {
+			dev_err(&server->service->dev,
+					"Failed to send pending message type %d: %d - resetting session\n",
+					msg->type, err);
+			vs_service_reset_nosync(server->service);
+			break;
+		}
+
+		/*
+		 * The message sent successfully - remove it from the
+		 * queue. The corresponding vs_get_service() was done
+		 * when the pending message was created.
+		 */
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+	mutex_unlock(&server->message_queue_lock);
+
+	vs_service_state_unlock(server->service);
+
+	return;
+}
+
+/*
+ * Core server sysfs interface
+ */
+static ssize_t server_core_create_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = to_vs_session_device(dev->parent);
+	struct core_server *server = dev_to_core_server(&service->dev);
+	struct vs_service_device *new_service;
+	char *p;
+	ssize_t ret = count;
+
+	/* FIXME - Buffer sizes are not defined in generated headers */
+	/* discard leading whitespace */
+	while (count && isspace(*buf)) {
+		buf++;
+		count--;
+	}
+	if (!count) {
+		dev_info(dev, "empty service name\n");
+		return -EINVAL;
+	}
+	/* discard trailing whitespace */
+	while (count && isspace(buf[count - 1]))
+		count--;
+
+	if (count > VSERVICE_CORE_SERVICE_NAME_SIZE) {
+		dev_info(dev, "service name too long (max %d)\n", VSERVICE_CORE_SERVICE_NAME_SIZE);
+		return -EINVAL;
+	}
+
+	p = kstrndup(buf, count, GFP_KERNEL);
+
+	/*
+	 * Writing a service name to this file creates a new service. The
+	 * service is created without a protocol. It will appear in sysfs
+	 * but will not be bound to a driver until a valid protocol name
+	 * has been written to the created devices protocol sysfs attribute.
+	 */
+	new_service = vs_server_core_create_service(server, session, service,
+			VS_SERVICE_AUTO_ALLOCATE_ID, p, NULL, NULL);
+	if (IS_ERR(new_service))
+		ret = PTR_ERR(new_service);
+
+	kfree(p);
+
+	return ret;
+}
+
+static ssize_t server_core_reset_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *core_service = to_vs_service_device(dev);
+	struct vs_session_device *session =
+		vs_service_get_session(core_service);
+	struct vs_service_device *target;
+	vs_service_id_t service_id;
+	unsigned long val;
+	int err;
+
+	/*
+	 * Writing a valid service_id to this file does a reset of that service
+	 */
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	service_id = val;
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -EINVAL;
+
+	err = vs_service_reset(target, core_service);
+
+	vs_put_service(target);
+	return err < 0 ? err : count;
+}
+
+static ssize_t server_core_remove_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_service_device *target;
+	vs_service_id_t service_id;
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	service_id = val;
+	if (service_id == 0) {
+		/*
+		 * We don't allow removing the core service this way. The
+		 * core service will be removed when the session is removed.
+		 */
+		return -EINVAL;
+	}
+
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -EINVAL;
+
+	err = vs_service_delete(target, service);
+
+	vs_put_service(target);
+	return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR(create_service, S_IWUSR,
+		NULL, server_core_create_service_store);
+static DEVICE_ATTR(reset_service, S_IWUSR,
+		NULL, server_core_reset_service_store);
+static DEVICE_ATTR(remove_service, S_IWUSR,
+		NULL, server_core_remove_service_store);
+
+static struct attribute *server_core_dev_attrs[] = {
+	&dev_attr_create_service.attr,
+	&dev_attr_reset_service.attr,
+	&dev_attr_remove_service.attr,
+	NULL,
+};
+
+static const struct attribute_group server_core_attr_group = {
+	.attrs = server_core_dev_attrs,
+};
+
+static int init_transport_resource_allocation(struct core_server *server)
+{
+	struct vs_session_device *session = vs_core_server_session(server);
+	struct vs_transport *transport = session->transport;
+	size_t size;
+	int err;
+
+	mutex_init(&server->alloc_lock);
+	mutex_lock(&server->alloc_lock);
+
+	transport->vt->get_quota_limits(transport, &server->out_quota_remaining,
+			&server->in_quota_remaining);
+
+	transport->vt->get_notify_bits(transport, &server->out_notify_map_bits,
+			&server->in_notify_map_bits);
+
+	size = BITS_TO_LONGS(server->in_notify_map_bits) *
+			sizeof(unsigned long);
+	server->in_notify_map = kzalloc(size, GFP_KERNEL);
+	if (server->in_notify_map_bits && !server->in_notify_map) {
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	size = BITS_TO_LONGS(server->out_notify_map_bits) *
+			sizeof(unsigned long);
+	server->out_notify_map = kzalloc(size, GFP_KERNEL);
+	if (server->out_notify_map_bits && !server->out_notify_map) {
+		err = -ENOMEM;
+		goto fail_free_in_bits;
+	}
+
+	mutex_unlock(&server->alloc_lock);
+
+	return 0;
+
+fail_free_in_bits:
+	kfree(server->in_notify_map);
+fail:
+	mutex_unlock(&server->alloc_lock);
+	return err;
+}
+
+static int alloc_quota(unsigned minimum, unsigned best, unsigned set,
+		unsigned *remaining)
+{
+	unsigned quota;
+
+	if (set) {
+		quota = set;
+
+		if (quota > *remaining)
+			return -ENOSPC;
+	} else if (best) {
+		quota = min(best, *remaining);
+	} else {
+		quota = minimum;
+	}
+
+	if (quota < minimum)
+		return -ENOSPC;
+
+	*remaining -= quota;
+
+	return min_t(unsigned, quota, INT_MAX);
+}
+
+static int alloc_notify_bits(unsigned notify_count, unsigned long *map,
+		unsigned nr_bits)
+{
+	unsigned offset;
+
+	if (notify_count) {
+		offset = bitmap_find_next_zero_area(map, nr_bits, 0,
+				notify_count, 0);
+
+		if (offset >= nr_bits || offset > (unsigned)INT_MAX)
+			return -ENOSPC;
+
+		bitmap_set(map, offset, notify_count);
+	} else {
+		offset = 0;
+	}
+
+	return offset;
+}
+
+/*
+ * alloc_transport_resources - Allocates the quotas and notification bits for
+ * a service.
+ * @server: the core service state.
+ * @service: the service device to allocate resources for.
+ *
+ * This function allocates message quotas and notification bits. It is called
+ * for the core service in alloc(), and for every other service by the server
+ * bus probe() function.
+ */
+static int alloc_transport_resources(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(service);
+	unsigned in_bit_offset, out_bit_offset;
+	unsigned in_quota, out_quota;
+	int ret;
+	struct vs_service_driver *driver;
+
+	if (WARN_ON(!service->dev.driver))
+		return -ENODEV;
+
+	mutex_lock(&server->alloc_lock);
+
+	driver = to_vs_service_driver(service->dev.driver);
+
+	/* Quota allocations */
+	ret = alloc_quota(driver->in_quota_min, driver->in_quota_best,
+			service->in_quota_set, &server->in_quota_remaining);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate in quota\n");
+		goto fail_in_quota;
+	}
+	in_quota = ret;
+
+	ret = alloc_quota(driver->out_quota_min, driver->out_quota_best,
+			service->out_quota_set, &server->out_quota_remaining);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate out quota\n");
+		goto fail_out_quota;
+	}
+	out_quota = ret;
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+			"%d: quota in: %u out: %u; remaining in: %u out: %u\n",
+			service->id, in_quota, out_quota,
+			server->in_quota_remaining,
+			server->out_quota_remaining);
+
+	/* Notification bit allocations */
+	ret = alloc_notify_bits(service->notify_recv_bits,
+			server->in_notify_map, server->in_notify_map_bits);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate in notify bits\n");
+		goto fail_in_notify;
+	}
+	in_bit_offset = ret;
+
+	ret = alloc_notify_bits(service->notify_send_bits,
+			server->out_notify_map, server->out_notify_map_bits);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate out notify bits\n");
+		goto fail_out_notify;
+	}
+	out_bit_offset = ret;
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+			"notify bits in: %u/%u out: %u/%u\n",
+			in_bit_offset, service->notify_recv_bits,
+			out_bit_offset, service->notify_send_bits);
+
+	/* Fill in the device's allocations */
+	service->recv_quota = in_quota;
+	service->send_quota = out_quota;
+	service->notify_recv_offset = in_bit_offset;
+	service->notify_send_offset = out_bit_offset;
+
+	mutex_unlock(&server->alloc_lock);
+
+	return 0;
+
+fail_out_notify:
+	if (service->notify_recv_bits)
+		bitmap_clear(server->in_notify_map,
+				in_bit_offset, service->notify_recv_bits);
+fail_in_notify:
+	server->out_quota_remaining += out_quota;
+fail_out_quota:
+	server->in_quota_remaining += in_quota;
+fail_in_quota:
+
+	mutex_unlock(&server->alloc_lock);
+
+	service->recv_quota = 0;
+	service->send_quota = 0;
+	service->notify_recv_bits = 0;
+	service->notify_recv_offset = 0;
+	service->notify_send_bits = 0;
+	service->notify_send_offset = 0;
+
+	return ret;
+}
+
+/*
+ * free_transport_resources - Frees the quotas and notification bits for
+ * a non-core service.
+ * @server: the core service state.
+ * @service: the service device to free resources for.
+ *
+ * This function is called by the server to free message quotas and
+ * notification bits that were allocated by alloc_transport_resources. It must
+ * only be called when the target service is in reset, and must be called with
+ * the core service's state lock held.
+ */
+static int free_transport_resources(struct core_server *server,
+		struct vs_service_device *service)
+{
+	mutex_lock(&server->alloc_lock);
+
+	if (service->notify_recv_bits)
+		bitmap_clear(server->in_notify_map,
+				service->notify_recv_offset,
+				service->notify_recv_bits);
+
+	if (service->notify_send_bits)
+		bitmap_clear(server->out_notify_map,
+				service->notify_send_offset,
+				service->notify_send_bits);
+
+	server->in_quota_remaining += service->recv_quota;
+	server->out_quota_remaining += service->send_quota;
+
+	mutex_unlock(&server->alloc_lock);
+
+	service->recv_quota = 0;
+	service->send_quota = 0;
+	service->notify_recv_bits = 0;
+	service->notify_recv_offset = 0;
+	service->notify_send_bits = 0;
+	service->notify_send_offset = 0;
+
+	return 0;
+}
+
+static struct vs_server_core_state *
+vs_core_server_alloc(struct vs_service_device *service)
+{
+	struct core_server *server;
+	int err;
+
+	if (WARN_ON(service->id != 0))
+		goto fail;
+
+	server = kzalloc(sizeof(*server), GFP_KERNEL);
+	if (!server)
+		goto fail;
+
+	server->service = service;
+	INIT_LIST_HEAD(&server->message_queue);
+	INIT_WORK(&server->message_queue_work, message_queue_work);
+	mutex_init(&server->message_queue_lock);
+
+	err = init_transport_resource_allocation(server);
+	if (err)
+		goto fail_init_alloc;
+
+	err = alloc_transport_resources(server, service);
+	if (err)
+		goto fail_alloc_transport;
+
+	err = sysfs_create_group(&service->dev.kobj, &server_core_attr_group);
+	if (err)
+		goto fail_sysfs;
+
+	return &server->state;
+
+fail_sysfs:
+	free_transport_resources(server, service);
+fail_alloc_transport:
+	kfree(server->out_notify_map);
+	kfree(server->in_notify_map);
+fail_init_alloc:
+	kfree(server);
+fail:
+	return NULL;
+}
+
+static void vs_core_server_release(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+
+	/* Delete all the other services */
+	vs_session_delete_noncore(session);
+
+	sysfs_remove_group(&server->service->dev.kobj, &server_core_attr_group);
+	kfree(server->out_notify_map);
+	kfree(server->in_notify_map);
+	kfree(server);
+}
+
+/**
+ * vs_server_create_service - create and register a new vService server
+ * @session: the session to create the vService server on
+ * @parent: an existing server that is managing the new server
+ * @name: the name of the new service
+ * @protocol: the protocol for the new service
+ * @plat_data: value to be assigned to (struct device *)->platform_data
+ */
+struct vs_service_device *
+vs_server_create_service(struct vs_session_device *session,
+		struct vs_service_device *parent, const char *name,
+		const char *protocol, const void *plat_data)
+{
+	struct vs_service_device *core_service, *new_service;
+	struct core_server *server;
+
+	if (!session->is_server || !name || !protocol)
+		return NULL;
+
+	core_service = session->core_service;
+	if (!core_service)
+		return NULL;
+
+	device_lock(&core_service->dev);
+	if (!core_service->dev.driver) {
+		device_unlock(&core_service->dev);
+		return NULL;
+	}
+
+	server = dev_to_core_server(&core_service->dev);
+
+	if (!parent)
+		parent = core_service;
+
+	new_service = vs_server_core_create_service(server, session, parent,
+			VS_SERVICE_AUTO_ALLOCATE_ID, name, protocol, plat_data);
+
+	device_unlock(&core_service->dev);
+
+	if (IS_ERR(new_service))
+		return NULL;
+
+	return new_service;
+}
+EXPORT_SYMBOL(vs_server_create_service);
+
+/**
+ * vs_server_destroy_service - destroy and unregister a vService server. This
+ * function must _not_ be used from the target service's own workqueue.
+ * @service: The service to destroy
+ */
+int vs_server_destroy_service(struct vs_service_device *service,
+		struct vs_service_device *parent)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	if (!session->is_server || service->id == 0)
+		return -EINVAL;
+
+	if (!parent)
+		parent = session->core_service;
+
+	return vs_service_delete(service, parent);
+}
+EXPORT_SYMBOL(vs_server_destroy_service);
+
+static void __queue_service_created(struct vs_service_device *service,
+		void *data)
+{
+	struct core_server *server = (struct core_server *)data;
+
+	vs_server_core_queue_service_created(server, service);
+}
+
+static int vs_server_core_handle_connect(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+	int err;
+
+	/* Tell the other end that we've finished connecting. */
+	err = vs_server_core_core_send_ack_connect(state, GFP_KERNEL);
+	if (err)
+		return err;
+
+	/* Queue a service-created message for each existing service. */
+	vs_session_for_each_service(session, __queue_service_created, server);
+
+	/* Re-enable all the services. */
+	vs_session_enable_noncore(session);
+
+	return 0;
+}
+
+static void vs_core_server_disable_services(struct core_server *server)
+{
+	struct vs_session_device *session = vs_core_server_session(server);
+	struct pending_message *msg;
+
+	/* Disable all the other services */
+	vs_session_disable_noncore(session);
+
+	/* Flush all the pending service-readiness messages */
+	mutex_lock(&server->message_queue_lock);
+	while (!list_empty(&server->message_queue)) {
+		msg = list_first_entry(&server->message_queue,
+				struct pending_message, list);
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+	mutex_unlock(&server->message_queue_lock);
+}
+
+static int vs_server_core_handle_disconnect(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+
+	vs_core_server_disable_services(server);
+
+	return vs_server_core_core_send_ack_disconnect(state, GFP_KERNEL);
+}
+
+static int
+vs_server_core_handle_service_reset(struct vs_server_core_state *state,
+		unsigned service_id)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+
+	if (service_id == 0)
+		return -EPROTO;
+
+	return vs_service_handle_reset(session, service_id, false);
+}
+
+static void vs_core_server_start(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+	int err;
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
+			"Core server start\n");
+
+	err = vs_server_core_core_send_startup(&server->state,
+			server->service->recv_quota,
+			server->service->send_quota, GFP_KERNEL);
+
+	if (err)
+		dev_err(&session->dev, "Failed to start core protocol: %d\n",
+				err);
+}
+
+static void vs_core_server_reset(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
+			"Core server reset\n");
+
+	vs_core_server_disable_services(server);
+}
+
+static struct vs_server_core vs_core_server_driver = {
+	.alloc		= vs_core_server_alloc,
+	.release	= vs_core_server_release,
+	.start		= vs_core_server_start,
+	.reset		= vs_core_server_reset,
+	.tx_ready	= vs_core_server_tx_ready,
+	.core = {
+		.req_connect		= vs_server_core_handle_connect,
+		.req_disconnect		= vs_server_core_handle_disconnect,
+		.msg_service_reset	= vs_server_core_handle_service_reset,
+	},
+};
+
+/*
+ * Server bus driver
+ */
+static int vs_server_bus_match(struct device *dev, struct device_driver *driver)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
+
+	/* Don't match anything to the devio driver; it's bound manually */
+	if (!vsdrv->protocol)
+		return 0;
+
+	WARN_ON_ONCE(!service->is_server || !vsdrv->is_server);
+
+	/* Don't match anything that doesn't have a protocol set yet */
+	if (!service->protocol)
+		return 0;
+
+	if (strcmp(service->protocol, vsdrv->protocol) == 0)
+		return 1;
+
+	return 0;
+}
+
+static int vs_server_bus_probe(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	int ret;
+
+	/*
+	 * Set the notify counts for the service, unless the driver is the
+	 * devio driver in which case it has already been done by the devio
+	 * bind ioctl. The devio driver cannot be bound automatically.
+	 */
+	struct vs_service_driver *driver =
+		to_vs_service_driver(service->dev.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	if (driver != &vs_devio_server_driver)
+#endif
+	{
+		service->notify_recv_bits = driver->in_notify_count;
+		service->notify_send_bits = driver->out_notify_count;
+	}
+
+	/*
+	 * We can't allocate transport resources here for the core service
+	 * because the resource pool doesn't exist yet. It's done in alloc()
+	 * instead (which is called, indirectly, by vs_service_bus_probe()).
+	 */
+	if (service->id == 0)
+		return vs_service_bus_probe(dev);
+
+	if (!server)
+		return -ENODEV;
+	ret = alloc_transport_resources(server, service);
+	if (ret < 0)
+		goto fail;
+
+	ret = vs_service_bus_probe(dev);
+	if (ret < 0)
+		goto fail_free_resources;
+
+	return 0;
+
+fail_free_resources:
+	free_transport_resources(server, service);
+fail:
+	return ret;
+}
+
+static int vs_server_bus_remove(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+
+	vs_service_bus_remove(dev);
+
+	/*
+	 * We skip free_transport_resources for the core service because the
+	 * resource pool has already been freed at this point. It's also
+	 * possible that the core service has disappeared, in which case
+	 * there's no work to do here.
+	 */
+	if (server != NULL && service->id != 0)
+		free_transport_resources(server, service);
+
+	return 0;
+}
+
+static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
+}
+
+static DEVICE_ATTR_RO(is_server);
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
+}
+
+static DEVICE_ATTR_RO(id);
+
+static ssize_t dev_protocol_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
+}
+
+struct service_enable_work_struct {
+	struct vs_service_device *service;
+	struct work_struct work;
+};
+
+static void service_enable_work(struct work_struct *work)
+{
+	struct service_enable_work_struct *enable_work = container_of(work,
+			struct service_enable_work_struct, work);
+	struct vs_service_device *service = enable_work->service;
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	bool started;
+	int ret;
+
+	kfree(enable_work);
+
+	if (!server)
+		return;
+	/* Start and enable the service */
+	vs_service_state_lock(server->service);
+	started = vs_service_start(service);
+	if (!started) {
+		vs_service_state_unlock(server->service);
+		vs_put_service(service);
+		return;
+	}
+
+	if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
+		vs_service_enable(service);
+	vs_service_state_unlock(server->service);
+
+	/* Tell the bus to search for a driver that supports the protocol */
+	ret = device_attach(&service->dev);
+	if (ret == 0)
+		dev_warn(&service->dev, "No driver found for protocol: %s\n",
+				service->protocol);
+	kobject_uevent(&service->dev.kobj, KOBJ_CHANGE);
+
+	/* The corresponding vs_get_service was done when the work was queued */
+	vs_put_service(service);
+}
+
+static ssize_t dev_protocol_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct service_enable_work_struct *enable_work;
+
+	/* The protocol can only be set once */
+	if (service->protocol)
+		return -EPERM;
+
+	/* Registering additional core servers is not allowed */
+	if (strcmp(buf, VSERVICE_CORE_PROTOCOL_NAME) == 0)
+		return -EINVAL;
+
+	if (strnlen(buf, VSERVICE_CORE_PROTOCOL_NAME_SIZE) + 1 >
+			VSERVICE_CORE_PROTOCOL_NAME_SIZE)
+		return -E2BIG;
+
+	enable_work = kmalloc(sizeof(*enable_work), GFP_KERNEL);
+	if (!enable_work)
+		return -ENOMEM;
+
+	/* Set the protocol and tell the client about it */
+	service->protocol = kstrdup(buf, GFP_KERNEL);
+	if (!service->protocol) {
+		kfree(enable_work);
+		return -ENOMEM;
+	}
+	strim(service->protocol);
+
+	/*
+	 * Schedule work to enable the service. We can't do it here because
+	 * we need to take the core service lock, and doing that here makes
+	 * it depend circularly on this sysfs attribute, which can be deleted
+	 * with that lock held.
+	 *
+	 * The corresponding vs_put_service is called in the enable_work
+	 * function.
+	 */
+	INIT_WORK(&enable_work->work, service_enable_work);
+	enable_work->service = vs_get_service(service);
+	schedule_work(&enable_work->work);
+
+	return count;
+}
+
+static DEVICE_ATTR(protocol, 0644,
+				dev_protocol_show, dev_protocol_store);
+
+static ssize_t service_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
+}
+
+static DEVICE_ATTR_RO(service_name);
+
+static ssize_t quota_in_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	int ret;
+	unsigned long in_quota;
+
+	if (!server)
+		return -ENODEV;
+	/*
+	 * Don't allow quota to be changed for services that have a driver
+	 * bound. We take the alloc lock here because the device lock is held
+	 * while creating and destroying this sysfs item. This means we can
+	 * race with driver binding, but that doesn't matter: we actually just
+	 * want to know that alloc_transport_resources() hasn't run yet, and
+	 * that takes the alloc lock.
+	 */
+	mutex_lock(&server->alloc_lock);
+	if (service->dev.driver) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	ret = kstrtoul(buf, 0, &in_quota);
+	if (ret < 0)
+		goto out;
+
+	service->in_quota_set = in_quota;
+	ret = count;
+
+out:
+	mutex_unlock(&server->alloc_lock);
+
+	return ret;
+}
+
+static ssize_t quota_in_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", service->recv_quota);
+}
+
+static DEVICE_ATTR_RW(quota_in);
+
+static ssize_t quota_out_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	int ret;
+	unsigned long out_quota;
+
+	if (!server)
+		return -ENODEV;
+	/* See comment in quota_in_store. */
+	mutex_lock(&server->alloc_lock);
+	if (service->dev.driver) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	ret = kstrtoul(buf, 0, &out_quota);
+	if (ret < 0)
+		goto out;
+
+	service->out_quota_set = out_quota;
+	ret = count;
+
+out:
+	mutex_unlock(&server->alloc_lock);
+
+	return ret;
+}
+
+static ssize_t quota_out_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", service->send_quota);
+}
+
+static DEVICE_ATTR_RW(quota_out);
+
+static struct attribute *vs_server_dev_attrs[] = {
+	&dev_attr_id.attr,
+	&dev_attr_is_server.attr,
+	&dev_attr_protocol.attr,
+	&dev_attr_service_name.attr,
+	&dev_attr_quota_in.attr,
+	&dev_attr_quota_out.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vs_server_dev);
+
+static ssize_t protocol_show(struct device_driver *drv, char *buf)
+{
+	struct vs_service_driver *vsdrv = to_vs_service_driver(drv);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", vsdrv->protocol);
+}
+
+static DRIVER_ATTR_RO(protocol);
+
+static struct attribute *vs_server_drv_attrs[] = {
+	&driver_attr_protocol.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vs_server_drv);
+
+struct bus_type vs_server_bus_type = {
+	.name		= "vservices-server",
+	.dev_groups	= vs_server_dev_groups,
+	.drv_groups	= vs_server_drv_groups,
+	.match		= vs_server_bus_match,
+	.probe		= vs_server_bus_probe,
+	.remove		= vs_server_bus_remove,
+	.uevent		= vs_service_bus_uevent,
+};
+EXPORT_SYMBOL(vs_server_bus_type);
+
+/*
+ * Server session driver
+ */
+static int vs_server_session_probe(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_service_device *service;
+
+	service = __vs_server_core_register_service(session, 0, NULL,
+			VSERVICE_CORE_SERVICE_NAME,
+			VSERVICE_CORE_PROTOCOL_NAME, NULL);
+
+    return PTR_ERR_OR_ZERO(service);
+}
+
+static int
+vs_server_session_service_added(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	if (WARN_ON(!server || !service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_created(server, service);
+
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send service_created: %d\n", err);
+
+	return err;
+}
+
+static int
+vs_server_session_service_start(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	if (WARN_ON(!server || !service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_reset_ready(server,
+			VSERVICE_CORE_CORE_MSG_SERVER_READY, service);
+
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send server_ready: %d\n", err);
+
+	return err;
+}
+
+static int
+vs_server_session_service_local_reset(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	if (WARN_ON(!server || !service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_reset_ready(server,
+			VSERVICE_CORE_CORE_MSG_SERVICE_RESET, service);
+
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send service_reset: %d\n", err);
+
+	return err;
+}
+
+static int
+vs_server_session_service_removed(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	/*
+	 * It's possible for the core server to be forcibly removed before
+	 * the other services, for example when the underlying transport
+	 * vanishes. If that happens, we can end up here with a NULL core
+	 * server pointer.
+	 */
+	if (!server)
+		return 0;
+
+	if (WARN_ON(!service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_removed(server, service);
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send service_removed: %d\n", err);
+
+	return err;
+}
+
+static struct vs_session_driver vs_server_session_driver = {
+	.driver	= {
+		.name			= "vservices-server-session",
+		.owner			= THIS_MODULE,
+		.bus			= &vs_session_bus_type,
+		.probe			= vs_server_session_probe,
+		.suppress_bind_attrs	= true,
+	},
+	.is_server		= true,
+	.service_bus		= &vs_server_bus_type,
+	.service_added		= vs_server_session_service_added,
+	.service_start		= vs_server_session_service_start,
+	.service_local_reset	= vs_server_session_service_local_reset,
+	.service_removed	= vs_server_session_service_removed,
+};
+
+static int __init vs_core_server_init(void)
+{
+	int ret;
+
+	ret = bus_register(&vs_server_bus_type);
+	if (ret)
+		goto fail_bus_register;
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	vs_devio_server_driver.driver.bus = &vs_server_bus_type;
+	vs_devio_server_driver.driver.owner = THIS_MODULE;
+	ret = driver_register(&vs_devio_server_driver.driver);
+	if (ret)
+		goto fail_devio_register;
+#endif
+
+	ret = driver_register(&vs_server_session_driver.driver);
+	if (ret)
+		goto fail_driver_register;
+
+	ret = vservice_core_server_register(&vs_core_server_driver,
+			"vs_core_server");
+	if (ret)
+		goto fail_core_register;
+
+	vservices_server_root = kobject_create_and_add("server-sessions",
+			vservices_root);
+	if (!vservices_server_root) {
+		ret = -ENOMEM;
+		goto fail_create_root;
+	}
+
+	return 0;
+
+fail_create_root:
+	vservice_core_server_unregister(&vs_core_server_driver);
+fail_core_register:
+	driver_unregister(&vs_server_session_driver.driver);
+fail_driver_register:
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_server_driver.driver);
+	vs_devio_server_driver.driver.bus = NULL;
+	vs_devio_server_driver.driver.owner = NULL;
+fail_devio_register:
+#endif
+	bus_unregister(&vs_server_bus_type);
+fail_bus_register:
+	return ret;
+}
+
+static void __exit vs_core_server_exit(void)
+{
+	kobject_put(vservices_server_root);
+	vservice_core_server_unregister(&vs_core_server_driver);
+	driver_unregister(&vs_server_session_driver.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_server_driver.driver);
+	vs_devio_server_driver.driver.bus = NULL;
+	vs_devio_server_driver.driver.owner = NULL;
+#endif
+	bus_unregister(&vs_server_bus_type);
+}
+
+subsys_initcall(vs_core_server_init);
+module_exit(vs_core_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Core Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/debug.h b/drivers/vservices/debug.h
new file mode 100644
index 0000000..b379b04
--- /dev/null
+++ b/drivers/vservices/debug.h
@@ -0,0 +1,74 @@
+/*
+ * drivers/vservices/debug.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Debugging macros and support functions for Virtual Services.
+ */
+#ifndef _VSERVICES_DEBUG_H
+#define _VSERVICES_DEBUG_H
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+#include <linux/printk.h>
+#else
+#ifndef no_printk
+#define no_printk(format, args...) do { } while (0)
+#endif
+#endif
+
+#include <vservices/session.h>
+#include "transport.h"
+
+#define VS_DEBUG_TRANSPORT		(1 << 0)
+#define VS_DEBUG_TRANSPORT_MESSAGES	(1 << 1)
+#define VS_DEBUG_SESSION		(1 << 2)
+#define VS_DEBUG_CLIENT			(1 << 3)
+#define VS_DEBUG_CLIENT_CORE		(1 << 4)
+#define VS_DEBUG_SERVER			(1 << 5)
+#define VS_DEBUG_SERVER_CORE		(1 << 6)
+#define VS_DEBUG_PROTOCOL		(1 << 7)
+#define VS_DEBUG_ALL			0xff
+
+#ifdef CONFIG_VSERVICES_DEBUG
+
+#define vs_debug(type, session, format, args...)			\
+	do {								\
+		if ((session)->debug_mask & (type))			\
+			dev_dbg(&(session)->dev, format, ##args);	\
+	} while (0)
+
+#define vs_dev_debug(type, session, dev, format, args...)		\
+	do {								\
+		if ((session)->debug_mask & (type))			\
+			dev_dbg(dev, format, ##args);			\
+	} while (0)
+
+static inline void vs_debug_dump_mbuf(struct vs_session_device *session,
+		struct vs_mbuf *mbuf)
+{
+	if (session->debug_mask & VS_DEBUG_TRANSPORT_MESSAGES)
+		print_hex_dump_bytes("msg:", DUMP_PREFIX_OFFSET,
+				mbuf->data, mbuf->size);
+}
+
+#else
+
+/* Dummy versions: Use no_printk to retain type/format string checking */
+#define vs_debug(type, session, format, args...) \
+	do { (void)session; no_printk(format, ##args); } while(0)
+
+#define vs_dev_debug(type, session, dev, format, args...) \
+	do { (void)session; (void)dev; no_printk(format, ##args); } while(0)
+
+static inline void vs_debug_dump_mbuf(struct vs_session_device *session,
+		struct vs_mbuf *mbuf) {}
+
+#endif /* CONFIG_VSERVICES_DEBUG */
+
+#endif /* _VSERVICES_DEBUG_H */
diff --git a/drivers/vservices/devio.c b/drivers/vservices/devio.c
new file mode 100644
index 0000000..8155c8c
--- /dev/null
+++ b/drivers/vservices/devio.c
@@ -0,0 +1,1060 @@
+/*
+ * devio.c - cdev I/O for service devices
+ *
+ * Copyright (c) 2016 Cog Systems Pty Ltd
+ *     Author: Philip Derrin <philip@cog.systems>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/security.h>
+#include <linux/compat.h>
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+#include <vservices/ioctl.h>
+#include <linux/sched/signal.h>
+#include "session.h"
+
+#define VSERVICES_DEVICE_MAX (VS_MAX_SERVICES * VS_MAX_SESSIONS)
+
+struct vs_devio_priv {
+	struct kref kref;
+	bool running, reset;
+
+	/* Receive queue */
+	wait_queue_head_t recv_wq;
+	atomic_t notify_pending;
+	struct list_head recv_queue;
+};
+
+static void
+vs_devio_priv_free(struct kref *kref)
+{
+	struct vs_devio_priv *priv = container_of(kref, struct vs_devio_priv,
+			kref);
+
+	WARN_ON(priv->running);
+	WARN_ON(!list_empty_careful(&priv->recv_queue));
+	WARN_ON(waitqueue_active(&priv->recv_wq));
+
+	kfree(priv);
+}
+
+static void vs_devio_priv_put(struct vs_devio_priv *priv)
+{
+	kref_put(&priv->kref, vs_devio_priv_free);
+}
+
+static int
+vs_devio_service_probe(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv;
+
+	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	kref_init(&priv->kref);
+	priv->running = false;
+	priv->reset = false;
+	init_waitqueue_head(&priv->recv_wq);
+	atomic_set(&priv->notify_pending, 0);
+	INIT_LIST_HEAD(&priv->recv_queue);
+
+	dev_set_drvdata(&service->dev, priv);
+
+	wake_up(&service->quota_wq);
+
+	return 0;
+}
+
+static int
+vs_devio_service_remove(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+	WARN_ON(priv->running);
+	WARN_ON(!list_empty_careful(&priv->recv_queue));
+	WARN_ON(waitqueue_active(&priv->recv_wq));
+
+	vs_devio_priv_put(priv);
+
+	return 0;
+}
+
+static int
+vs_devio_service_receive(struct vs_service_device *service,
+		struct vs_mbuf *mbuf)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+	WARN_ON(!priv->running);
+
+	spin_lock(&priv->recv_wq.lock);
+	list_add_tail(&mbuf->queue, &priv->recv_queue);
+	wake_up_locked(&priv->recv_wq);
+	spin_unlock(&priv->recv_wq.lock);
+
+	return 0;
+}
+
+static void
+vs_devio_service_notify(struct vs_service_device *service, u32 flags)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+	int old, cur;
+
+	WARN_ON(!priv->running);
+
+	if (!flags)
+		return;
+
+	/* open-coded atomic_or() */
+	cur = atomic_read(&priv->notify_pending);
+	while ((old = atomic_cmpxchg(&priv->notify_pending,
+					cur, cur | flags)) != cur)
+		cur = old;
+
+	wake_up(&priv->recv_wq);
+}
+
+static void
+vs_devio_service_start(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+	if (!priv->reset) {
+		WARN_ON(priv->running);
+		priv->running = true;
+		wake_up(&service->quota_wq);
+	}
+}
+
+static void
+vs_devio_service_reset(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+	struct vs_mbuf *mbuf, *tmp;
+
+	WARN_ON(!priv->running && !priv->reset);
+
+	/*
+	 * Mark the service as being in reset. This flag can never be cleared
+	 * on an open device; the user must acknowledge the reset by closing
+	 * and reopening the device.
+	 */
+	priv->reset = true;
+	priv->running = false;
+
+	spin_lock_irq(&priv->recv_wq.lock);
+	list_for_each_entry_safe(mbuf, tmp, &priv->recv_queue, queue)
+		vs_service_free_mbuf(service, mbuf);
+	INIT_LIST_HEAD(&priv->recv_queue);
+	spin_unlock_irq(&priv->recv_wq.lock);
+	wake_up_all(&priv->recv_wq);
+}
+
+/*
+ * This driver will be registered by the core server module, which must also
+ * set its bus and owner function pointers.
+ */
+struct vs_service_driver vs_devio_server_driver = {
+	/* No protocol, so the normal bus match will never bind this. */
+	.protocol	= NULL,
+	.is_server	= true,
+	.rx_atomic	= true,
+
+	.probe		= vs_devio_service_probe,
+	.remove		= vs_devio_service_remove,
+	.receive	= vs_devio_service_receive,
+	.notify		= vs_devio_service_notify,
+	.start		= vs_devio_service_start,
+	.reset		= vs_devio_service_reset,
+
+	/*
+	 * Set reasonable default quotas. These can be overridden by passing
+	 * nonzero values to IOCTL_VS_BIND_SERVER, which will set the
+	 * service's *_quota_set fields.
+	 */
+	.in_quota_min	= 1,
+	.in_quota_best	= 8,
+	.out_quota_min	= 1,
+	.out_quota_best	= 8,
+
+	/* Mark the notify counts as invalid; the service's will be used. */
+	.in_notify_count = (unsigned)-1,
+	.out_notify_count = (unsigned)-1,
+
+	.driver		= {
+		.name			= "vservices-server-devio",
+		.owner			= NULL, /* set by core server */
+		.bus			= NULL, /* set by core server */
+		.suppress_bind_attrs	= true, /* see vs_devio_poll */
+	},
+};
+EXPORT_SYMBOL_GPL(vs_devio_server_driver);
+
+static int
+vs_devio_bind_server(struct vs_service_device *service,
+		struct vs_ioctl_bind *bind)
+{
+	int ret = -ENODEV;
+
+	/* Ensure the server module is loaded and the driver is registered. */
+	if (!try_module_get(vs_devio_server_driver.driver.owner))
+		goto fail_module_get;
+
+	device_lock(&service->dev);
+	ret = -EBUSY;
+	if (service->dev.driver != NULL)
+		goto fail_device_unbound;
+
+	/* Set up the quota and notify counts. */
+	service->in_quota_set = bind->recv_quota;
+	service->out_quota_set = bind->send_quota;
+	service->notify_send_bits = bind->send_notify_bits;
+	service->notify_recv_bits = bind->recv_notify_bits;
+
+	/* Manually probe the driver. */
+	service->dev.driver = &vs_devio_server_driver.driver;
+	ret = service->dev.bus->probe(&service->dev);
+	if (ret < 0)
+		goto fail_probe_driver;
+
+	ret = device_bind_driver(&service->dev);
+	if (ret < 0)
+		goto fail_bind_driver;
+
+	/* Pass the allocated quotas back to the user. */
+	bind->recv_quota = service->recv_quota;
+	bind->send_quota = service->send_quota;
+	bind->msg_size = vs_service_max_mbuf_size(service);
+
+	device_unlock(&service->dev);
+	module_put(vs_devio_server_driver.driver.owner);
+
+	return 0;
+
+fail_bind_driver:
+	ret = service->dev.bus->remove(&service->dev);
+fail_probe_driver:
+	service->dev.driver = NULL;
+fail_device_unbound:
+	device_unlock(&service->dev);
+	module_put(vs_devio_server_driver.driver.owner);
+fail_module_get:
+	return ret;
+}
+
+/*
+ * This driver will be registered by the core client module, which must also
+ * set its bus and owner pointers.
+ */
+struct vs_service_driver vs_devio_client_driver = {
+	/* No protocol, so the normal bus match will never bind this. */
+	.protocol	= NULL,
+	.is_server	= false,
+	.rx_atomic	= true,
+
+	.probe		= vs_devio_service_probe,
+	.remove		= vs_devio_service_remove,
+	.receive	= vs_devio_service_receive,
+	.notify		= vs_devio_service_notify,
+	.start		= vs_devio_service_start,
+	.reset		= vs_devio_service_reset,
+
+	.driver		= {
+		.name			= "vservices-client-devio",
+		.owner			= NULL, /* set by core client */
+		.bus			= NULL, /* set by core client */
+		.suppress_bind_attrs	= true, /* see vs_devio_poll */
+	},
+};
+EXPORT_SYMBOL_GPL(vs_devio_client_driver);
+
+static int
+vs_devio_bind_client(struct vs_service_device *service,
+		struct vs_ioctl_bind *bind)
+{
+	int ret = -ENODEV;
+
+	/* Ensure the client module is loaded and the driver is registered. */
+	if (!try_module_get(vs_devio_client_driver.driver.owner))
+		goto fail_module_get;
+
+	device_lock(&service->dev);
+	ret = -EBUSY;
+	if (service->dev.driver != NULL)
+		goto fail_device_unbound;
+
+	/* Manually probe the driver. */
+	service->dev.driver = &vs_devio_client_driver.driver;
+	ret = service->dev.bus->probe(&service->dev);
+	if (ret < 0)
+		goto fail_probe_driver;
+
+	ret = device_bind_driver(&service->dev);
+	if (ret < 0)
+		goto fail_bind_driver;
+
+	/* Pass the allocated quotas back to the user. */
+	bind->recv_quota = service->recv_quota;
+	bind->send_quota = service->send_quota;
+	bind->msg_size = vs_service_max_mbuf_size(service);
+	bind->send_notify_bits = service->notify_send_bits;
+	bind->recv_notify_bits = service->notify_recv_bits;
+
+	device_unlock(&service->dev);
+	module_put(vs_devio_client_driver.driver.owner);
+
+	return 0;
+
+fail_bind_driver:
+	ret = service->dev.bus->remove(&service->dev);
+fail_probe_driver:
+	service->dev.driver = NULL;
+fail_device_unbound:
+	device_unlock(&service->dev);
+	module_put(vs_devio_client_driver.driver.owner);
+fail_module_get:
+	return ret;
+}
+
+static struct vs_devio_priv *
+vs_devio_priv_get_from_service(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = NULL;
+	struct device_driver *drv;
+
+	if (!service)
+		return NULL;
+
+	device_lock(&service->dev);
+	drv = service->dev.driver;
+
+	if ((drv == &vs_devio_client_driver.driver) ||
+			(drv == &vs_devio_server_driver.driver)) {
+		vs_service_state_lock(service);
+		priv = dev_get_drvdata(&service->dev);
+		if (priv)
+			kref_get(&priv->kref);
+		vs_service_state_unlock(service);
+	}
+
+	device_unlock(&service->dev);
+
+	return priv;
+}
+
+static int
+vs_devio_open(struct inode *inode, struct file *file)
+{
+	struct vs_service_device *service;
+
+	if (imajor(inode) != vservices_cdev_major)
+		return -ENODEV;
+
+	service = vs_service_lookup_by_devt(inode->i_rdev);
+	if (!service)
+		return -ENODEV;
+
+	file->private_data = service;
+
+	return 0;
+}
+
+static int
+vs_devio_release(struct inode *inode, struct file *file)
+{
+	struct vs_service_device *service = file->private_data;
+
+	if (service) {
+		struct vs_devio_priv *priv =
+			vs_devio_priv_get_from_service(service);
+
+		if (priv) {
+			device_release_driver(&service->dev);
+			vs_devio_priv_put(priv);
+		}
+
+		file->private_data = NULL;
+		vs_put_service(service);
+	}
+
+	return 0;
+}
+
+static struct iovec *
+vs_devio_check_iov(struct vs_ioctl_iovec *io, bool is_send, ssize_t *total)
+{
+	struct iovec *iov;
+	unsigned i;
+	int ret;
+
+	if (io->iovcnt > UIO_MAXIOV)
+		return ERR_PTR(-EINVAL);
+
+	iov = kmalloc(sizeof(*iov) * io->iovcnt, GFP_KERNEL);
+	if (!iov)
+		return ERR_PTR(-ENOMEM);
+
+	if (copy_from_user(iov, io->iov, sizeof(*iov) * io->iovcnt)) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	*total = 0;
+	for (i = 0; i < io->iovcnt; i++) {
+		ssize_t iov_len = (ssize_t)iov[i].iov_len;
+
+		if (iov_len > MAX_RW_COUNT - *total) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		if (!access_ok(is_send ? VERIFY_READ : VERIFY_WRITE,
+					iov[i].iov_base, iov_len)) {
+			ret = -EFAULT;
+			goto fail;
+		}
+
+		*total += iov_len;
+	}
+
+	return iov;
+
+fail:
+	kfree(iov);
+	return ERR_PTR(ret);
+}
+
+static ssize_t
+vs_devio_send(struct vs_service_device *service, struct iovec *iov,
+		size_t iovcnt, ssize_t to_send, bool nonblocking)
+{
+	struct vs_mbuf *mbuf = NULL;
+	struct vs_devio_priv *priv;
+	unsigned i;
+	ssize_t offset = 0;
+	ssize_t ret;
+	DEFINE_WAIT(wait);
+
+	priv = vs_devio_priv_get_from_service(service);
+	ret = -ENODEV;
+	if (!priv)
+		goto fail_priv_get;
+
+	vs_service_state_lock(service);
+
+	/*
+	 * Waiting alloc. We must open-code this because there is no real
+	 * state structure or base state.
+	 */
+	ret = 0;
+	while (!vs_service_send_mbufs_available(service)) {
+		if (nonblocking) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		prepare_to_wait_exclusive(&service->quota_wq, &wait,
+				TASK_INTERRUPTIBLE);
+
+		vs_service_state_unlock(service);
+		schedule();
+		vs_service_state_lock(service);
+
+		if (priv->reset) {
+			ret = -ECONNRESET;
+			break;
+		}
+
+		if (!priv->running) {
+			ret = -ENOTCONN;
+			break;
+		}
+	}
+	finish_wait(&service->quota_wq, &wait);
+
+	if (ret)
+		goto fail_alloc;
+
+	mbuf = vs_service_alloc_mbuf(service, to_send, GFP_KERNEL);
+	if (IS_ERR(mbuf)) {
+		ret = PTR_ERR(mbuf);
+		goto fail_alloc;
+	}
+
+	/* Ready to send; copy data into the mbuf. */
+	ret = -EFAULT;
+	for (i = 0; i < iovcnt; i++) {
+		if (copy_from_user(mbuf->data + offset, iov[i].iov_base,
+					iov[i].iov_len))
+			goto fail_copy;
+		offset += iov[i].iov_len;
+	}
+	mbuf->size = to_send;
+
+	/* Send the message. */
+	ret = vs_service_send(service, mbuf);
+	if (ret < 0)
+		goto fail_send;
+
+	/* Wake the next waiter, if there's more quota available. */
+	if (waitqueue_active(&service->quota_wq) &&
+			vs_service_send_mbufs_available(service) > 0)
+		wake_up(&service->quota_wq);
+
+	vs_service_state_unlock(service);
+	vs_devio_priv_put(priv);
+
+	return to_send;
+
+fail_send:
+fail_copy:
+	vs_service_free_mbuf(service, mbuf);
+	wake_up(&service->quota_wq);
+fail_alloc:
+	vs_service_state_unlock(service);
+	vs_devio_priv_put(priv);
+fail_priv_get:
+	return ret;
+}
+
+static ssize_t
+vs_devio_recv(struct vs_service_device *service, struct iovec *iov,
+		size_t iovcnt, u32 *notify_bits, ssize_t recv_space,
+		bool nonblocking)
+{
+	struct vs_mbuf *mbuf = NULL;
+	struct vs_devio_priv *priv;
+	unsigned i;
+	ssize_t offset = 0;
+	ssize_t ret;
+	DEFINE_WAIT(wait);
+
+	priv = vs_devio_priv_get_from_service(service);
+	ret = -ENODEV;
+	if (!priv)
+		goto fail_priv_get;
+
+	/* Take the recv_wq lock, which also protects recv_queue. */
+	spin_lock_irq(&priv->recv_wq.lock);
+
+	/* Wait for a message, notification, or reset. */
+	ret = wait_event_interruptible_exclusive_locked_irq(priv->recv_wq,
+			!list_empty(&priv->recv_queue) || priv->reset ||
+			atomic_read(&priv->notify_pending) || nonblocking);
+
+	if (priv->reset)
+		ret = -ECONNRESET; /* Service reset */
+	else if (!ret && list_empty(&priv->recv_queue))
+		ret = -EAGAIN; /* Nonblocking, or notification */
+
+	if (ret < 0) {
+		spin_unlock_irq(&priv->recv_wq.lock);
+		goto no_mbuf;
+	}
+
+	/* Take the first mbuf from the list, and check its size. */
+	mbuf = list_first_entry(&priv->recv_queue, struct vs_mbuf, queue);
+	if (mbuf->size > recv_space) {
+		spin_unlock_irq(&priv->recv_wq.lock);
+		ret = -EMSGSIZE;
+		goto fail_msg_size;
+	}
+	list_del_init(&mbuf->queue);
+
+	spin_unlock_irq(&priv->recv_wq.lock);
+
+	/* Copy to user. */
+	ret = -EFAULT;
+	for (i = 0; (mbuf->size > offset) && (i < iovcnt); i++) {
+		size_t len = min(mbuf->size - offset, iov[i].iov_len);
+		if (copy_to_user(iov[i].iov_base, mbuf->data + offset, len))
+			goto fail_copy;
+		offset += len;
+	}
+	ret = offset;
+
+no_mbuf:
+	/*
+	 * Read and clear the pending notification bits. If any notifications
+	 * are received, don't return an error, even if we failed to receive a
+	 * message.
+	 */
+	*notify_bits = atomic_xchg(&priv->notify_pending, 0);
+	if ((ret < 0) && *notify_bits)
+		ret = 0;
+
+fail_copy:
+	if (mbuf)
+		vs_service_free_mbuf(service, mbuf);
+fail_msg_size:
+	vs_devio_priv_put(priv);
+fail_priv_get:
+	return ret;
+}
+
+static int
+vs_devio_check_perms(struct file *file, unsigned flags)
+{
+	if ((flags & MAY_READ) & !(file->f_mode & FMODE_READ))
+		return -EBADF;
+
+	if ((flags & MAY_WRITE) & !(file->f_mode & FMODE_WRITE))
+		return -EBADF;
+
+	return security_file_permission(file, flags);
+}
+
+static long
+vs_devio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *ptr = (void __user *)arg;
+	struct vs_service_device *service = file->private_data;
+	struct vs_ioctl_bind bind;
+	struct vs_ioctl_iovec io;
+	u32 flags;
+	long ret;
+	ssize_t iov_total;
+	struct iovec *iov;
+
+	if (!service)
+		return -ENODEV;
+
+	switch (cmd) {
+	case IOCTL_VS_RESET_SERVICE:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		ret = vs_service_reset(service, service);
+		break;
+	case IOCTL_VS_GET_NAME:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (service->name != NULL) {
+			size_t len = strnlen(service->name,
+					_IOC_SIZE(IOCTL_VS_GET_NAME) - 1);
+			if (copy_to_user(ptr, service->name, len + 1))
+				ret = -EFAULT;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case IOCTL_VS_GET_PROTOCOL:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (service->protocol != NULL) {
+			size_t len = strnlen(service->protocol,
+					_IOC_SIZE(IOCTL_VS_GET_PROTOCOL) - 1);
+			if (copy_to_user(ptr, service->protocol, len + 1))
+				ret = -EFAULT;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case IOCTL_VS_BIND_CLIENT:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		ret = vs_devio_bind_client(service, &bind);
+		if (!ret && copy_to_user(ptr, &bind, sizeof(bind)))
+			ret = -EFAULT;
+		break;
+	case IOCTL_VS_BIND_SERVER:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&bind, ptr, sizeof(bind))) {
+			ret = -EFAULT;
+			break;
+		}
+		ret = vs_devio_bind_server(service, &bind);
+		if (!ret && copy_to_user(ptr, &bind, sizeof(bind)))
+			ret = -EFAULT;
+		break;
+	case IOCTL_VS_NOTIFY:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&flags, ptr, sizeof(flags))) {
+			ret = -EFAULT;
+			break;
+		}
+		ret = vs_service_notify(service, flags);
+		break;
+	case IOCTL_VS_SEND:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&io, ptr, sizeof(io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_iov(&io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_send(service, iov, io.iovcnt, iov_total,
+				file->f_flags & O_NONBLOCK);
+		kfree(iov);
+		break;
+	case IOCTL_VS_RECV:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&io, ptr, sizeof(io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_iov(&io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_recv(service, iov, io.iovcnt,
+			&io.notify_bits, iov_total,
+			file->f_flags & O_NONBLOCK);
+		kfree(iov);
+
+		if (ret >= 0) {
+			u32 __user *notify_bits_ptr = ptr + offsetof(
+					struct vs_ioctl_iovec, notify_bits);
+			if (copy_to_user(notify_bits_ptr, &io.notify_bits,
+					sizeof(io.notify_bits)))
+				ret = -EFAULT;
+		}
+		break;
+	default:
+		dev_dbg(&service->dev, "Unknown ioctl %#x, arg: %lx\n", cmd,
+				arg);
+		ret = -ENOSYS;
+		break;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct vs_compat_ioctl_bind {
+	__u32 send_quota;
+	__u32 recv_quota;
+	__u32 send_notify_bits;
+	__u32 recv_notify_bits;
+	compat_size_t msg_size;
+};
+
+#define compat_ioctl_bind_conv(dest, src) ({ \
+	dest.send_quota = src.send_quota;		\
+	dest.recv_quota = src.recv_quota;		\
+	dest.send_notify_bits = src.send_notify_bits;	\
+	dest.recv_notify_bits = src.recv_notify_bits;	\
+	dest.msg_size = (compat_size_t)src.msg_size;	\
+})
+
+#define COMPAT_IOCTL_VS_BIND_CLIENT _IOR('4', 3, struct vs_compat_ioctl_bind)
+#define COMPAT_IOCTL_VS_BIND_SERVER _IOWR('4', 4, struct vs_compat_ioctl_bind)
+
+struct vs_compat_ioctl_iovec {
+	union {
+		__u32 iovcnt; /* input */
+		__u32 notify_bits; /* output (recv only) */
+	};
+	compat_uptr_t iov;
+};
+
+#define COMPAT_IOCTL_VS_SEND \
+    _IOW('4', 6, struct vs_compat_ioctl_iovec)
+#define COMPAT_IOCTL_VS_RECV \
+    _IOWR('4', 7, struct vs_compat_ioctl_iovec)
+
+static struct iovec *
+vs_devio_check_compat_iov(struct vs_compat_ioctl_iovec *c_io,
+	bool is_send, ssize_t *total)
+{
+	struct iovec *iov;
+	struct compat_iovec *c_iov;
+
+	unsigned i;
+	int ret;
+
+	if (c_io->iovcnt > UIO_MAXIOV)
+		return ERR_PTR(-EINVAL);
+
+	c_iov = kzalloc(sizeof(*c_iov) * c_io->iovcnt, GFP_KERNEL);
+	if (!c_iov)
+		return ERR_PTR(-ENOMEM);
+
+	iov = kzalloc(sizeof(*iov) * c_io->iovcnt, GFP_KERNEL);
+	if (!iov) {
+		kfree(c_iov);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (copy_from_user(c_iov, (struct compat_iovec __user *)
+		compat_ptr(c_io->iov), sizeof(*c_iov) * c_io->iovcnt)) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	*total = 0;
+	for (i = 0; i < c_io->iovcnt; i++) {
+		ssize_t iov_len;
+		iov[i].iov_base = compat_ptr (c_iov[i].iov_base);
+		iov[i].iov_len = (compat_size_t) c_iov[i].iov_len;
+
+		iov_len = (ssize_t)iov[i].iov_len;
+
+		if (iov_len > MAX_RW_COUNT - *total) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		if (!access_ok(is_send ? VERIFY_READ : VERIFY_WRITE,
+					iov[i].iov_base, iov_len)) {
+			ret = -EFAULT;
+			goto fail;
+		}
+
+		*total += iov_len;
+	}
+
+	kfree (c_iov);
+	return iov;
+
+fail:
+	kfree(c_iov);
+	kfree(iov);
+	return ERR_PTR(ret);
+}
+
+static long
+vs_devio_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *ptr = (void __user *)arg;
+	struct vs_service_device *service = file->private_data;
+	struct vs_ioctl_bind bind;
+	struct vs_compat_ioctl_bind compat_bind;
+	struct vs_compat_ioctl_iovec compat_io;
+	long ret;
+	ssize_t iov_total;
+	struct iovec *iov;
+
+	if (!service)
+		return -ENODEV;
+
+	switch (cmd) {
+	case IOCTL_VS_RESET_SERVICE:
+	case IOCTL_VS_GET_NAME:
+	case IOCTL_VS_GET_PROTOCOL:
+		return vs_devio_ioctl (file, cmd, arg);
+	case COMPAT_IOCTL_VS_SEND:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&compat_io, ptr, sizeof(compat_io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_compat_iov(&compat_io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_send(service, iov, compat_io.iovcnt, iov_total,
+				file->f_flags & O_NONBLOCK);
+		kfree(iov);
+
+		break;
+	case COMPAT_IOCTL_VS_RECV:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&compat_io, ptr, sizeof(compat_io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_compat_iov(&compat_io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_recv(service, iov, compat_io.iovcnt,
+			&compat_io.notify_bits, iov_total,
+			file->f_flags & O_NONBLOCK);
+		kfree(iov);
+
+		if (ret >= 0) {
+			u32 __user *notify_bits_ptr = ptr + offsetof(
+					struct vs_compat_ioctl_iovec, notify_bits);
+			if (copy_to_user(notify_bits_ptr, &compat_io.notify_bits,
+					sizeof(compat_io.notify_bits)))
+				ret = -EFAULT;
+		}
+		break;
+	case COMPAT_IOCTL_VS_BIND_CLIENT:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		ret = vs_devio_bind_client(service, &bind);
+		compat_ioctl_bind_conv(compat_bind, bind);
+		if (!ret && copy_to_user(ptr, &compat_bind,
+					sizeof(compat_bind)))
+			ret = -EFAULT;
+		break;
+	case COMPAT_IOCTL_VS_BIND_SERVER:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&compat_bind, ptr, sizeof(compat_bind))) {
+			ret = -EFAULT;
+			break;
+		}
+		compat_ioctl_bind_conv(bind, compat_bind);
+		ret = vs_devio_bind_server(service, &bind);
+		compat_ioctl_bind_conv(compat_bind, bind);
+		if (!ret && copy_to_user(ptr, &compat_bind,
+					sizeof(compat_bind)))
+			ret = -EFAULT;
+		break;
+	default:
+		dev_dbg(&service->dev, "Unknown ioctl %#x, arg: %lx\n", cmd,
+				arg);
+		ret = -ENOSYS;
+		break;
+	}
+
+	return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
+static unsigned int
+vs_devio_poll(struct file *file, struct poll_table_struct *wait)
+{
+	struct vs_service_device *service = file->private_data;
+	struct vs_devio_priv *priv = vs_devio_priv_get_from_service(service);
+	unsigned int flags = 0;
+
+	poll_wait(file, &service->quota_wq, wait);
+
+	if (priv) {
+		/*
+		 * Note: there is no way for us to ensure that all poll
+		 * waiters on a given workqueue have gone away, other than to
+		 * actually close the file. So, this poll_wait() is only safe
+		 * if we never release our claim on the service before the
+		 * file is closed.
+		 *
+		 * We try to guarantee this by only unbinding the devio driver
+		 * on close, and setting suppress_bind_attrs in the driver so
+		 * root can't unbind us with sysfs.
+		 */
+		poll_wait(file, &priv->recv_wq, wait);
+
+		if (priv->reset) {
+			/* Service reset; raise poll error. */
+			flags |= POLLERR | POLLHUP;
+		} else if (priv->running) {
+			if (!list_empty_careful(&priv->recv_queue))
+				flags |= POLLRDNORM | POLLIN;
+			if (atomic_read(&priv->notify_pending))
+				flags |= POLLRDNORM | POLLIN;
+			if (vs_service_send_mbufs_available(service) > 0)
+				flags |= POLLWRNORM | POLLOUT;
+		}
+
+		vs_devio_priv_put(priv);
+	} else {
+		/* No driver attached. Return error flags. */
+		flags |= POLLERR | POLLHUP;
+	}
+
+	return flags;
+}
+
+static const struct file_operations vs_fops = {
+	.owner		= THIS_MODULE,
+	.open		= vs_devio_open,
+	.release	= vs_devio_release,
+	.unlocked_ioctl	= vs_devio_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= vs_devio_compat_ioctl,
+#endif
+	.poll		= vs_devio_poll,
+};
+
+int vservices_cdev_major;
+static struct cdev vs_cdev;
+
+int __init
+vs_devio_init(void)
+{
+	dev_t dev;
+	int r;
+
+	r = alloc_chrdev_region(&dev, 0, VSERVICES_DEVICE_MAX,
+			"vs_service");
+	if (r < 0)
+		goto fail_alloc_chrdev;
+	vservices_cdev_major = MAJOR(dev);
+
+	cdev_init(&vs_cdev, &vs_fops);
+	r = cdev_add(&vs_cdev, dev, VSERVICES_DEVICE_MAX);
+	if (r < 0)
+		goto fail_cdev_add;
+
+	return 0;
+
+fail_cdev_add:
+	unregister_chrdev_region(dev, VSERVICES_DEVICE_MAX);
+fail_alloc_chrdev:
+	return r;
+}
+
+void __exit
+vs_devio_exit(void)
+{
+	cdev_del(&vs_cdev);
+	unregister_chrdev_region(MKDEV(vservices_cdev_major, 0),
+			VSERVICES_DEVICE_MAX);
+}
diff --git a/drivers/vservices/protocol/Kconfig b/drivers/vservices/protocol/Kconfig
new file mode 100644
index 0000000..6dd280d
--- /dev/null
+++ b/drivers/vservices/protocol/Kconfig
@@ -0,0 +1,27 @@
+#
+# vServices protocol drivers configuration
+#
+
+if VSERVICES_SERVER || VSERVICES_CLIENT
+
+menu "Protocol drivers"
+config VSERVICES_PROTOCOL_SERIAL
+	bool
+
+config VSERVICES_PROTOCOL_SERIAL_SERVER
+	tristate "Serial server protocol"
+	depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+	select VSERVICES_PROTOCOL_SERIAL
+	help
+	  This option adds support for Virtual Services serial protocol server.
+
+config VSERVICES_PROTOCOL_SERIAL_CLIENT
+	tristate "Serial client protocol"
+	depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+	select VSERVICES_PROTOCOL_SERIAL
+	help
+	  This option adds support for Virtual Services serial protocol client.
+
+endmenu
+
+endif # VSERVICES_SERVER || VSERVICES_CLIENT
diff --git a/drivers/vservices/protocol/Makefile b/drivers/vservices/protocol/Makefile
new file mode 100644
index 0000000..10b99b5
--- /dev/null
+++ b/drivers/vservices/protocol/Makefile
@@ -0,0 +1,4 @@
+# This is a autogenerated Makefile for vservice-linux-stacks
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += core/
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL) += serial/
diff --git a/drivers/vservices/protocol/core/Makefile b/drivers/vservices/protocol/core/Makefile
new file mode 100644
index 0000000..6bef7f5
--- /dev/null
+++ b/drivers/vservices/protocol/core/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_SERVER) += vservices_protocol_core_server.o
+vservices_protocol_core_server-objs = server.o
+
+obj-$(CONFIG_VSERVICES_CLIENT) += vservices_protocol_core_client.o
+vservices_protocol_core_client-objs = client.o
diff --git a/drivers/vservices/protocol/core/client.c b/drivers/vservices/protocol/core/client.c
new file mode 100644
index 0000000..2dd2136
--- /dev/null
+++ b/drivers/vservices/protocol/core/client.c
@@ -0,0 +1,1069 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the core client protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_core_client_driver {
+	struct vs_client_core *client;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+        container_of(d, struct vs_core_client_driver, vsdrv)
+
+static void core_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->start)
+		client->start(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->reset)
+		client->reset(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->start)
+		client->start(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static void core_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->reset)
+		client->reset(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static int core_client_probe(struct vs_service_device *service);
+static int core_client_remove(struct vs_service_device *service);
+static int core_handle_message(struct vs_service_device *service,
+			       struct vs_mbuf *_mbuf);
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t flags);
+static void core_handle_start(struct vs_service_device *service);
+static void core_handle_start_bh(struct vs_service_device *service);
+static void core_handle_reset(struct vs_service_device *service);
+static void core_handle_reset_bh(struct vs_service_device *service);
+static int core_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_core_client_register(struct vs_client_core *client,
+				    const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_core_client_driver *driver;
+
+	if (client->tx_atomic && !client->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	client->driver = &driver->vsdrv;
+	driver->client = client;
+
+	driver->vsdrv.protocol = VSERVICE_CORE_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = false;
+	driver->vsdrv.rx_atomic = client->rx_atomic;
+	driver->vsdrv.tx_atomic = client->tx_atomic;
+
+	driver->vsdrv.probe = core_client_probe;
+	driver->vsdrv.remove = core_client_remove;
+	driver->vsdrv.receive = core_handle_message;
+	driver->vsdrv.notify = core_handle_notify;
+	driver->vsdrv.start = client->tx_atomic ?
+	    core_handle_start_bh : core_handle_start;
+	driver->vsdrv.reset = client->tx_atomic ?
+	    core_handle_reset_bh : core_handle_reset;
+	driver->vsdrv.tx_ready = core_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	client->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_core_client_register);
+
+int vservice_core_client_unregister(struct vs_client_core *client)
+{
+	struct vs_core_client_driver *driver;
+
+	if (!client->driver)
+		return 0;
+
+	driver = to_client_driver(client->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	client->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_core_client_unregister);
+
+static int core_client_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client = to_client_driver(vsdrv)->client;
+	struct vs_client_core_state *state;
+
+	state = client->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int core_client_remove(struct vs_service_device *service)
+{
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client = to_client_driver(vsdrv)->client;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	client->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int core_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client = to_client_driver(vsdrv)->client;
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+
+	if (client->tx_ready)
+		client->tx_ready(state);
+
+	return 0;
+}
+
+int vs_client_core_core_getbufs_service_created(struct vs_client_core_state
+						*_state,
+						struct vs_string *service_name,
+						struct vs_string *protocol_name,
+						struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+	    VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+	const size_t _min_size = _max_size - VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	service_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	service_name->max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+
+	protocol_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+	protocol_name->max_size =
+	    VS_MBUF_SIZE(_mbuf) - (sizeof(vs_message_id_t) +
+				   VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+
+	/* Now check the size received is the exact size expected */
+	_exact_size =
+	    _max_size - (VSERVICE_CORE_PROTOCOL_NAME_SIZE -
+			 protocol_name->max_size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_getbufs_service_created);
+int vs_client_core_core_free_service_created(struct vs_client_core_state
+					     *_state,
+					     struct vs_string *service_name,
+					     struct vs_string *protocol_name,
+					     struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_free_service_created);
+int
+vs_client_core_core_req_connect(struct vs_client_core_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_core *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_REQ_CONNECT;
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED__CONNECT;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT);
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_req_connect);
+int
+vs_client_core_core_req_disconnect(struct vs_client_core_state *_state,
+				   gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_core *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_REQ_DISCONNECT;
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED__DISCONNECT;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT);
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_req_disconnect);
+static int
+core_core_handle_ack_connect(const struct vs_client_core *_client,
+			     struct vs_client_core_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.ack_connect)
+		return _client->core.ack_connect(_state);
+	return 0;
+}
+
+static int
+core_core_handle_nack_connect(const struct vs_client_core *_client,
+			      struct vs_client_core_state *_state,
+			      struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.nack_connect)
+		return _client->core.nack_connect(_state);
+	return 0;
+}
+
+EXPORT_SYMBOL(core_core_handle_ack_connect);
+static int
+core_core_handle_ack_disconnect(const struct vs_client_core *_client,
+				struct vs_client_core_state *_state,
+				struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.ack_disconnect)
+		return _client->core.ack_disconnect(_state);
+	return 0;
+}
+
+static int
+core_core_handle_nack_disconnect(const struct vs_client_core *_client,
+				 struct vs_client_core_state *_state,
+				 struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.nack_disconnect)
+		return _client->core.nack_disconnect(_state);
+	return 0;
+}
+
+EXPORT_SYMBOL(core_core_handle_ack_disconnect);
+static int
+vs_client_core_core_handle_startup(const struct vs_client_core *_client,
+				   struct vs_client_core_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+	uint32_t core_in_quota;
+	uint32_t core_out_quota;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_OFFLINE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state, VSERVICE_CORE_STATE_OFFLINE,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+	core_in_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	core_out_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_startup)
+		return _client->core.msg_startup(_state, core_in_quota,
+						 core_out_quota);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_startup);
+static int
+vs_client_core_core_handle_shutdown(const struct vs_client_core *_client,
+				    struct vs_client_core_state *_state,
+				    struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_client->core.state_change)
+			_client->core.state_change(_state,
+						   VSERVICE_CORE_STATE_DISCONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+	case VSERVICE_CORE_STATE_CONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_client->core.state_change)
+			_client->core.state_change(_state,
+						   VSERVICE_CORE_STATE_CONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+
+	default:
+		break;
+	}
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_shutdown)
+		return _client->core.msg_shutdown(_state);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_shutdown);
+static int
+vs_client_core_core_handle_service_created(const struct vs_client_core *_client,
+					   struct vs_client_core_state *_state,
+					   struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+	    VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+	uint32_t service_id;
+	struct vs_string service_name;
+	struct vs_string protocol_name;
+	const size_t _min_size = _max_size - VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+	size_t _exact_size;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	service_name.ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	service_name.max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+
+	protocol_name.ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+	protocol_name.max_size =
+	    VS_MBUF_SIZE(_mbuf) - (sizeof(vs_message_id_t) +
+				   VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+
+	/* Now check the size received is the exact size expected */
+	_exact_size =
+	    _max_size - (VSERVICE_CORE_PROTOCOL_NAME_SIZE -
+			 protocol_name.max_size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_client->core.msg_service_created)
+		return _client->core.msg_service_created(_state, service_id,
+							 service_name,
+							 protocol_name, _mbuf);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_created);
+static int
+vs_client_core_core_handle_service_removed(const struct vs_client_core *_client,
+					   struct vs_client_core_state *_state,
+					   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	uint32_t service_id;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_service_removed)
+		return _client->core.msg_service_removed(_state, service_id);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_removed);
+static int
+vs_client_core_core_handle_server_ready(const struct vs_client_core *_client,
+					struct vs_client_core_state *_state,
+					struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 28UL;
+	uint32_t service_id;
+	uint32_t in_quota;
+	uint32_t out_quota;
+	uint32_t in_bit_offset;
+	uint32_t in_num_bits;
+	uint32_t out_bit_offset;
+	uint32_t out_num_bits;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	in_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	out_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+	in_bit_offset =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   12UL);
+	in_num_bits =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   16UL);
+	out_bit_offset =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   20UL);
+	out_num_bits =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   24UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_server_ready)
+		return _client->core.msg_server_ready(_state, service_id,
+						      in_quota, out_quota,
+						      in_bit_offset,
+						      in_num_bits,
+						      out_bit_offset,
+						      out_num_bits);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_server_ready);
+static int
+vs_client_core_core_handle_service_reset(const struct vs_client_core *_client,
+					 struct vs_client_core_state *_state,
+					 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	uint32_t service_id;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_service_reset)
+		return _client->core.msg_service_reset(_state, service_id);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_reset);
+int
+vs_client_core_core_send_service_reset(struct vs_client_core_state *_state,
+				       uint32_t service_id, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_core *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVICE_RESET;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_send_service_reset);
+static int
+core_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_client_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_core *client =
+	    to_client_driver(vsdrv)->client;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface core **/
+/* command in sync connect */
+	case VSERVICE_CORE_CORE_ACK_CONNECT:
+		ret = core_core_handle_ack_connect(client, state, _mbuf);
+		break;
+	case VSERVICE_CORE_CORE_NACK_CONNECT:
+		ret = core_core_handle_nack_connect(client, state, _mbuf);
+		break;
+
+/* command in sync disconnect */
+	case VSERVICE_CORE_CORE_ACK_DISCONNECT:
+		ret = core_core_handle_ack_disconnect(client, state, _mbuf);
+		break;
+	case VSERVICE_CORE_CORE_NACK_DISCONNECT:
+		ret = core_core_handle_nack_disconnect(client, state, _mbuf);
+		break;
+
+/* message startup */
+	case VSERVICE_CORE_CORE_MSG_STARTUP:
+		ret = vs_client_core_core_handle_startup(client, state, _mbuf);
+		break;
+
+/* message shutdown */
+	case VSERVICE_CORE_CORE_MSG_SHUTDOWN:
+		ret = vs_client_core_core_handle_shutdown(client, state, _mbuf);
+		break;
+
+/* message service_created */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
+		ret =
+		    vs_client_core_core_handle_service_created(client, state,
+							       _mbuf);
+		break;
+
+/* message service_removed */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
+		ret =
+		    vs_client_core_core_handle_service_removed(client, state,
+							       _mbuf);
+		break;
+
+/* message server_ready */
+	case VSERVICE_CORE_CORE_MSG_SERVER_READY:
+		ret =
+		    vs_client_core_core_handle_server_ready(client, state,
+							    _mbuf);
+		break;
+
+/* message service_reset */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+		ret =
+		    vs_client_core_core_handle_service_reset(client, state,
+							     _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t notify_bits)
+{
+	__maybe_unused struct vs_client_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_core *client =
+	    to_client_driver(vsdrv)->client;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface core **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services coreClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/core/server.c b/drivers/vservices/protocol/core/server.c
new file mode 100644
index 0000000..c3f3686
--- /dev/null
+++ b/drivers/vservices/protocol/core/server.c
@@ -0,0 +1,1226 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the core server protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_core_server_driver {
+	struct vs_server_core *server;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+        container_of(d, struct vs_core_server_driver, vsdrv)
+
+static void core_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->start)
+		server->start(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->reset)
+		server->reset(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->start)
+		server->start(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static void core_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->reset)
+		server->reset(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static int core_server_probe(struct vs_service_device *service);
+static int core_server_remove(struct vs_service_device *service);
+static int core_handle_message(struct vs_service_device *service,
+			       struct vs_mbuf *_mbuf);
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t flags);
+static void core_handle_start(struct vs_service_device *service);
+static void core_handle_start_bh(struct vs_service_device *service);
+static void core_handle_reset(struct vs_service_device *service);
+static void core_handle_reset_bh(struct vs_service_device *service);
+static int core_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_core_server_register(struct vs_server_core *server,
+				    const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_core_server_driver *driver;
+
+	if (server->tx_atomic && !server->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	server->driver = &driver->vsdrv;
+	driver->server = server;
+
+	driver->vsdrv.protocol = VSERVICE_CORE_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = true;
+	driver->vsdrv.rx_atomic = server->rx_atomic;
+	driver->vsdrv.tx_atomic = server->tx_atomic;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.in_quota_min = 1;
+	driver->vsdrv.in_quota_best = server->in_quota_best ?
+	    server->in_quota_best : driver->vsdrv.in_quota_min;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.out_quota_min = 1;
+	driver->vsdrv.out_quota_best = server->out_quota_best ?
+	    server->out_quota_best : driver->vsdrv.out_quota_min;
+	driver->vsdrv.in_notify_count = VSERVICE_CORE_NBIT_IN__COUNT;
+	driver->vsdrv.out_notify_count = VSERVICE_CORE_NBIT_OUT__COUNT;
+
+	driver->vsdrv.probe = core_server_probe;
+	driver->vsdrv.remove = core_server_remove;
+	driver->vsdrv.receive = core_handle_message;
+	driver->vsdrv.notify = core_handle_notify;
+	driver->vsdrv.start = server->tx_atomic ?
+	    core_handle_start_bh : core_handle_start;
+	driver->vsdrv.reset = server->tx_atomic ?
+	    core_handle_reset_bh : core_handle_reset;
+	driver->vsdrv.tx_ready = core_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	server->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_core_server_register);
+
+int vservice_core_server_unregister(struct vs_server_core *server)
+{
+	struct vs_core_server_driver *driver;
+
+	if (!server->driver)
+		return 0;
+
+	driver = to_server_driver(server->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	server->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_core_server_unregister);
+
+static int core_server_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server = to_server_driver(vsdrv)->server;
+	struct vs_server_core_state *state;
+
+	state = server->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int core_server_remove(struct vs_service_device *service)
+{
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server = to_server_driver(vsdrv)->server;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	server->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int core_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server = to_server_driver(vsdrv)->server;
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+
+	if (server->tx_ready)
+		server->tx_ready(state);
+
+	return 0;
+}
+
+struct vs_mbuf *vs_server_core_core_alloc_service_created(struct
+							  vs_server_core_state
+							  *_state,
+							  struct vs_string
+							  *service_name,
+							  struct vs_string
+							  *protocol_name,
+							  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+	    VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!service_name)
+		goto fail;
+	service_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	service_name->max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+	if (!protocol_name)
+		goto fail;
+	protocol_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+	protocol_name->max_size = VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_alloc_service_created);
+int vs_server_core_core_free_service_created(struct vs_server_core_state
+					     *_state,
+					     struct vs_string *service_name,
+					     struct vs_string *protocol_name,
+					     struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_free_service_created);
+int
+vs_server_core_core_send_ack_connect(struct vs_server_core_state *_state,
+				     gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_ACK_CONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_ack_connect);
+int
+vs_server_core_core_send_nack_connect(struct vs_server_core_state *_state,
+				      gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_NACK_CONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_nack_connect);
+int
+vs_server_core_core_send_ack_disconnect(struct vs_server_core_state *_state,
+					gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_ACK_DISCONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_ack_disconnect);
+int
+vs_server_core_core_send_nack_disconnect(struct vs_server_core_state *_state,
+					 gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_NACK_DISCONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_nack_disconnect);
+static int
+vs_server_core_core_handle_req_connect(const struct vs_server_core *_server,
+				       struct vs_server_core_state *_state,
+				       struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED__CONNECT;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->core.req_connect)
+		return _server->core.req_connect(_state);
+	else
+		dev_warn(&_state->service->dev,
+			 "[%s:%d] Protocol warning: No handler registered for _server->core.req_connect, command will never be acknowledged\n",
+			 __func__, __LINE__);
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_req_connect);
+static int
+vs_server_core_core_handle_req_disconnect(const struct vs_server_core *_server,
+					  struct vs_server_core_state *_state,
+					  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED__DISCONNECT;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->core.req_disconnect)
+		return _server->core.req_disconnect(_state);
+	else
+		dev_warn(&_state->service->dev,
+			 "[%s:%d] Protocol warning: No handler registered for _server->core.req_disconnect, command will never be acknowledged\n",
+			 __func__, __LINE__);
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_req_disconnect);
+int
+vs_server_core_core_send_startup(struct vs_server_core_state *_state,
+				 uint32_t core_in_quota,
+				 uint32_t core_out_quota, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_OFFLINE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_STARTUP;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    core_in_quota;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    core_out_quota;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state, VSERVICE_CORE_STATE_OFFLINE,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_startup);
+int
+vs_server_core_core_send_shutdown(struct vs_server_core_state *_state,
+				  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SHUTDOWN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_server->core.state_change)
+			_server->core.state_change(_state,
+						   VSERVICE_CORE_STATE_DISCONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+	case VSERVICE_CORE_STATE_CONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_server->core.state_change)
+			_server->core.state_change(_state,
+						   VSERVICE_CORE_STATE_CONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_shutdown);
+int
+vs_server_core_core_send_service_created(struct vs_server_core_state *_state,
+					 uint32_t service_id,
+					 struct vs_string service_name,
+					 struct vs_string protocol_name,
+					 struct vs_mbuf *_mbuf)
+{
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_CORE_CORE_MSG_SERVICE_CREATED)
+
+		return -EINVAL;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+	{
+		size_t _size = strnlen(service_name.ptr, service_name.max_size);
+		if ((_size + sizeof(vs_message_id_t) + 4UL) >
+		    VS_MBUF_SIZE(_mbuf))
+			return -EINVAL;
+
+		memset(service_name.ptr + _size, 0,
+		       service_name.max_size - _size);
+	}
+	{
+		size_t _size =
+		    strnlen(protocol_name.ptr, protocol_name.max_size);
+		if ((_size + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL) >
+		    VS_MBUF_SIZE(_mbuf))
+			return -EINVAL;
+
+		if (_size < protocol_name.max_size)
+			VS_MBUF_SIZE(_mbuf) -= (protocol_name.max_size - _size);
+
+	}
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_created);
+int
+vs_server_core_core_send_service_removed(struct vs_server_core_state *_state,
+					 uint32_t service_id, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_removed);
+int
+vs_server_core_core_send_server_ready(struct vs_server_core_state *_state,
+				      uint32_t service_id, uint32_t in_quota,
+				      uint32_t out_quota,
+				      uint32_t in_bit_offset,
+				      uint32_t in_num_bits,
+				      uint32_t out_bit_offset,
+				      uint32_t out_num_bits, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 28UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVER_READY;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    in_quota;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL) =
+	    out_quota;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+	    in_bit_offset;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+	    in_num_bits;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+	    out_bit_offset;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+	    out_num_bits;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_server_ready);
+int
+vs_server_core_core_send_service_reset(struct vs_server_core_state *_state,
+				       uint32_t service_id, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVICE_RESET;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_reset);
+static int
+vs_server_core_core_handle_service_reset(const struct vs_server_core *_server,
+					 struct vs_server_core_state *_state,
+					 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	uint32_t service_id;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->core.msg_service_reset)
+		return _server->core.msg_service_reset(_state, service_id);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_service_reset);
+static int
+core_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_server_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_core *server =
+	    to_server_driver(vsdrv)->server;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface core **/
+/* command in sync connect */
+	case VSERVICE_CORE_CORE_REQ_CONNECT:
+		ret =
+		    vs_server_core_core_handle_req_connect(server, state,
+							   _mbuf);
+		break;
+
+/* command in sync disconnect */
+	case VSERVICE_CORE_CORE_REQ_DISCONNECT:
+		ret =
+		    vs_server_core_core_handle_req_disconnect(server, state,
+							      _mbuf);
+		break;
+
+/* message service_reset */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+		ret =
+		    vs_server_core_core_handle_service_reset(server, state,
+							     _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t notify_bits)
+{
+	__maybe_unused struct vs_server_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_core *server =
+	    to_server_driver(vsdrv)->server;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface core **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services coreServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/serial/Makefile b/drivers/vservices/protocol/serial/Makefile
new file mode 100644
index 0000000..f5f29ed
--- /dev/null
+++ b/drivers/vservices/protocol/serial/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL_CLIENT) += vservices_protocol_serial_client.o
+vservices_protocol_serial_client-objs = client.o
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL_SERVER) += vservices_protocol_serial_server.o
+vservices_protocol_serial_server-objs = server.o
diff --git a/drivers/vservices/protocol/serial/client.c b/drivers/vservices/protocol/serial/client.c
new file mode 100644
index 0000000..1c37e72
--- /dev/null
+++ b/drivers/vservices/protocol/serial/client.c
@@ -0,0 +1,925 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the serial client protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+static int _vs_client_serial_req_open(struct vs_client_serial_state *_state);
+
+/*** Linux driver model integration ***/
+struct vs_serial_client_driver {
+	struct vs_client_serial *client;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+        container_of(d, struct vs_serial_client_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void serial_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	_vs_client_serial_req_open(state);
+
+	vs_service_state_unlock(service);
+}
+
+static void serial_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (client->closed)
+		client->closed(state);
+
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void serial_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	_vs_client_serial_req_open(state);
+
+	vs_service_state_unlock_bh(service);
+}
+
+static void serial_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock_bh(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (client->closed)
+		client->closed(state);
+
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static int serial_client_probe(struct vs_service_device *service);
+static int serial_client_remove(struct vs_service_device *service);
+static int serial_handle_message(struct vs_service_device *service,
+				 struct vs_mbuf *_mbuf);
+static void serial_handle_notify(struct vs_service_device *service,
+				 uint32_t flags);
+static void serial_handle_start(struct vs_service_device *service);
+static void serial_handle_start_bh(struct vs_service_device *service);
+static void serial_handle_reset(struct vs_service_device *service);
+static void serial_handle_reset_bh(struct vs_service_device *service);
+static int serial_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_serial_client_register(struct vs_client_serial *client,
+				      const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_serial_client_driver *driver;
+
+	if (client->tx_atomic && !client->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	client->driver = &driver->vsdrv;
+	driver->client = client;
+
+	driver->vsdrv.protocol = VSERVICE_SERIAL_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = false;
+	driver->vsdrv.rx_atomic = client->rx_atomic;
+	driver->vsdrv.tx_atomic = client->tx_atomic;
+
+	driver->vsdrv.probe = serial_client_probe;
+	driver->vsdrv.remove = serial_client_remove;
+	driver->vsdrv.receive = serial_handle_message;
+	driver->vsdrv.notify = serial_handle_notify;
+	driver->vsdrv.start = client->tx_atomic ?
+	    serial_handle_start_bh : serial_handle_start;
+	driver->vsdrv.reset = client->tx_atomic ?
+	    serial_handle_reset_bh : serial_handle_reset;
+	driver->vsdrv.tx_ready = serial_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	client->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_serial_client_register);
+
+int vservice_serial_client_unregister(struct vs_client_serial *client)
+{
+	struct vs_serial_client_driver *driver;
+
+	if (!client->driver)
+		return 0;
+
+	driver = to_client_driver(client->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	client->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_serial_client_unregister);
+
+static int serial_client_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+	struct vs_client_serial_state *state;
+
+	state = client->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int serial_client_remove(struct vs_service_device *service)
+{
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	client->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int serial_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+		return 0;
+
+	if (client->tx_ready)
+		client->tx_ready(state);
+
+	return 0;
+}
+
+static int _vs_client_serial_req_open(struct vs_client_serial_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_serial *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_REQ_OPEN;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_open);
+static int _vs_client_serial_req_close(struct vs_client_serial_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_serial *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_REQ_CLOSE;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_close);
+static int _vs_client_serial_req_reopen(struct vs_client_serial_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_serial *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_REQ_REOPEN;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_reopen);
+static int
+serial_base_handle_ack_open(const struct vs_client_serial *_client,
+			    struct vs_client_serial_state *_state,
+			    struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+	_state->serial.packet_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	_state->packet_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	_client->opened(_state);
+	return 0;
+
+}
+
+static int
+serial_base_handle_nack_open(const struct vs_client_serial *_client,
+			     struct vs_client_serial_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	dev_err(&VS_STATE_SERVICE_PTR(_state)->dev,
+		"Open operation failed for device %s\n",
+		VS_STATE_SERVICE_PTR(_state)->name);
+
+	return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_open);
+static int
+serial_base_handle_ack_close(const struct vs_client_serial *_client,
+			     struct vs_client_serial_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return 0;
+
+}
+
+static int
+serial_base_handle_nack_close(const struct vs_client_serial *_client,
+			      struct vs_client_serial_state *_state,
+			      struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_close);
+static int
+serial_base_handle_ack_reopen(const struct vs_client_serial *_client,
+			      struct vs_client_serial_state *_state,
+			      struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE__RESET;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->reopened) {
+		_client->reopened(_state);
+		return 0;
+	}
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return _vs_client_serial_req_open(_state);
+
+}
+
+static int
+serial_base_handle_nack_reopen(const struct vs_client_serial *_client,
+			       struct vs_client_serial_state *_state,
+			       struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_reopen);
+struct vs_mbuf *vs_client_serial_serial_alloc_msg(struct vs_client_serial_state
+						  *_state, struct vs_pbuf *b,
+						  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!b)
+		goto fail;
+	b->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b->size = _state->serial.packet_size;
+	b->max_size = b->size;
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_alloc_msg);
+int vs_client_serial_serial_getbufs_msg(struct vs_client_serial_state *_state,
+					struct vs_pbuf *b,
+					struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	const size_t _min_size = _max_size - _state->serial.packet_size;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	b->size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	b->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b->max_size = b->size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->serial.packet_size - b->size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_getbufs_msg);
+int vs_client_serial_serial_free_msg(struct vs_client_serial_state *_state,
+				     struct vs_pbuf *b, struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_free_msg);
+static int
+vs_client_serial_serial_handle_msg(const struct vs_client_serial *_client,
+				   struct vs_client_serial_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	struct vs_pbuf b;
+	const size_t _min_size = _max_size - _state->serial.packet_size;
+	size_t _exact_size;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	b.size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	b.data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b.max_size = b.size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->serial.packet_size - b.size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_client->serial.msg_msg)
+		return _client->serial.msg_msg(_state, b, _mbuf);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_handle_msg);
+int
+vs_client_serial_serial_send_msg(struct vs_client_serial_state *_state,
+				 struct vs_pbuf b, struct vs_mbuf *_mbuf)
+{
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_serial *_client =
+	    to_client_driver(vsdrv)->client;
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_SERIAL_SERIAL_MSG_MSG)
+
+		return -EINVAL;
+
+	if ((b.size + sizeof(vs_message_id_t) + 0UL) > VS_MBUF_SIZE(_mbuf))
+		return -EINVAL;
+
+	if (b.size < b.max_size)
+		VS_MBUF_SIZE(_mbuf) -= (b.max_size - b.size);
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    b.size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_send_msg);
+static int
+serial_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_client_serial_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_serial *client =
+	    to_client_driver(vsdrv)->client;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+	case VSERVICE_SERIAL_BASE_ACK_OPEN:
+		ret = serial_base_handle_ack_open(client, state, _mbuf);
+		break;
+	case VSERVICE_SERIAL_BASE_NACK_OPEN:
+		ret = serial_base_handle_nack_open(client, state, _mbuf);
+		break;
+
+/* command in sync close */
+	case VSERVICE_SERIAL_BASE_ACK_CLOSE:
+		ret = serial_base_handle_ack_close(client, state, _mbuf);
+		break;
+	case VSERVICE_SERIAL_BASE_NACK_CLOSE:
+		ret = serial_base_handle_nack_close(client, state, _mbuf);
+		break;
+
+/* command in sync reopen */
+	case VSERVICE_SERIAL_BASE_ACK_REOPEN:
+		ret = serial_base_handle_ack_reopen(client, state, _mbuf);
+		break;
+	case VSERVICE_SERIAL_BASE_NACK_REOPEN:
+		ret = serial_base_handle_nack_reopen(client, state, _mbuf);
+		break;
+
+/** interface serial **/
+/* message msg */
+	case VSERVICE_SERIAL_SERIAL_MSG_MSG:
+		ret = vs_client_serial_serial_handle_msg(client, state, _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void serial_handle_notify(struct vs_service_device *service,
+				 uint32_t notify_bits)
+{
+	__maybe_unused struct vs_client_serial_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_serial *client =
+	    to_client_driver(vsdrv)->client;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface serial **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+int vs_client_serial_reopen(struct vs_client_serial_state *_state)
+{
+	return _vs_client_serial_req_reopen(_state);
+}
+
+EXPORT_SYMBOL(vs_client_serial_reopen);
+
+int vs_client_serial_close(struct vs_client_serial_state *_state)
+{
+	return _vs_client_serial_req_close(_state);
+}
+
+EXPORT_SYMBOL(vs_client_serial_close);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services serialClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/serial/server.c b/drivers/vservices/protocol/serial/server.c
new file mode 100644
index 0000000..e5d1034
--- /dev/null
+++ b/drivers/vservices/protocol/serial/server.c
@@ -0,0 +1,1086 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the serial server protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_serial_server_driver {
+	struct vs_server_serial *server;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+        container_of(d, struct vs_serial_server_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void serial_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void serial_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (server->closed)
+		server->closed(state);
+
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void serial_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static void serial_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock_bh(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (server->closed)
+		server->closed(state);
+
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static int serial_server_probe(struct vs_service_device *service);
+static int serial_server_remove(struct vs_service_device *service);
+static int serial_handle_message(struct vs_service_device *service,
+				 struct vs_mbuf *_mbuf);
+static void serial_handle_notify(struct vs_service_device *service,
+				 uint32_t flags);
+static void serial_handle_start(struct vs_service_device *service);
+static void serial_handle_start_bh(struct vs_service_device *service);
+static void serial_handle_reset(struct vs_service_device *service);
+static void serial_handle_reset_bh(struct vs_service_device *service);
+static int serial_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_serial_server_register(struct vs_server_serial *server,
+				      const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_serial_server_driver *driver;
+
+	if (server->tx_atomic && !server->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	server->driver = &driver->vsdrv;
+	driver->server = server;
+
+	driver->vsdrv.protocol = VSERVICE_SERIAL_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = true;
+	driver->vsdrv.rx_atomic = server->rx_atomic;
+	driver->vsdrv.tx_atomic = server->tx_atomic;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.in_quota_min = 1;
+	driver->vsdrv.in_quota_best = server->in_quota_best ?
+	    server->in_quota_best : driver->vsdrv.in_quota_min;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.out_quota_min = 1;
+	driver->vsdrv.out_quota_best = server->out_quota_best ?
+	    server->out_quota_best : driver->vsdrv.out_quota_min;
+	driver->vsdrv.in_notify_count = VSERVICE_SERIAL_NBIT_IN__COUNT;
+	driver->vsdrv.out_notify_count = VSERVICE_SERIAL_NBIT_OUT__COUNT;
+
+	driver->vsdrv.probe = serial_server_probe;
+	driver->vsdrv.remove = serial_server_remove;
+	driver->vsdrv.receive = serial_handle_message;
+	driver->vsdrv.notify = serial_handle_notify;
+	driver->vsdrv.start = server->tx_atomic ?
+	    serial_handle_start_bh : serial_handle_start;
+	driver->vsdrv.reset = server->tx_atomic ?
+	    serial_handle_reset_bh : serial_handle_reset;
+	driver->vsdrv.tx_ready = serial_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	server->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_serial_server_register);
+
+int vservice_serial_server_unregister(struct vs_server_serial *server)
+{
+	struct vs_serial_server_driver *driver;
+
+	if (!server->driver)
+		return 0;
+
+	driver = to_server_driver(server->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	server->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_serial_server_unregister);
+
+static int serial_server_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+	struct vs_server_serial_state *state;
+
+	state = server->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int serial_server_remove(struct vs_service_device *service)
+{
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	server->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int serial_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+		return 0;
+
+	if (server->tx_ready)
+		server->tx_ready(state);
+
+	return 0;
+}
+
+static int
+vs_server_serial_send_ack_open(struct vs_server_serial_state *_state,
+			       gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_ACK_OPEN;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _state->packet_size;
+	_state->serial.packet_size = _state->packet_size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_open);
+static int
+vs_server_serial_send_nack_open(struct vs_server_serial_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_NACK_OPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_open);
+static int
+vs_server_serial_send_ack_close(struct vs_server_serial_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_ACK_CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_close);
+static int
+vs_server_serial_send_nack_close(struct vs_server_serial_state *_state,
+				 gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_NACK_CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_close);
+static int
+vs_server_serial_send_ack_reopen(struct vs_server_serial_state *_state,
+				 gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_ACK_REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE__RESET;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_reopen);
+static int
+vs_server_serial_send_nack_reopen(struct vs_server_serial_state *_state,
+				  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_NACK_REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_reopen);
+static int
+vs_server_serial_handle_req_open(const struct vs_server_serial *_server,
+				 struct vs_server_serial_state *_state,
+				 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->open)
+		return vs_server_serial_open_complete(_state,
+						      _server->open(_state));
+	return vs_server_serial_open_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_serial_open_complete(struct vs_server_serial_state *_state,
+				   vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS)
+		ret =
+		    vs_server_serial_send_ack_open(_state,
+						   vs_service_has_atomic_rx
+						   (VS_STATE_SERVICE_PTR
+						    (_state)) ? GFP_ATOMIC :
+						   GFP_KERNEL);
+	else if (resp == VS_SERVER_RESP_FAILURE)
+		ret =
+		    vs_server_serial_send_nack_open(_state,
+						    vs_service_has_atomic_rx
+						    (VS_STATE_SERVICE_PTR
+						     (_state)) ? GFP_ATOMIC :
+						    GFP_KERNEL);
+
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_open_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_open);
+static int
+vs_server_serial_handle_req_close(const struct vs_server_serial *_server,
+				  struct vs_server_serial_state *_state,
+				  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->close)
+		return vs_server_serial_close_complete(_state,
+						       _server->close(_state));
+	return vs_server_serial_close_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_serial_close_complete(struct vs_server_serial_state *_state,
+				    vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS)
+		ret =
+		    vs_server_serial_send_ack_close(_state,
+						    vs_service_has_atomic_rx
+						    (VS_STATE_SERVICE_PTR
+						     (_state)) ? GFP_ATOMIC :
+						    GFP_KERNEL);
+	else if (resp == VS_SERVER_RESP_FAILURE)
+		ret =
+		    vs_server_serial_send_nack_close(_state,
+						     vs_service_has_atomic_rx
+						     (VS_STATE_SERVICE_PTR
+						      (_state)) ? GFP_ATOMIC :
+						     GFP_KERNEL);
+	if ((resp == VS_SERVER_RESP_SUCCESS) && (ret == 0)) {
+		wake_up_all(&_state->service->quota_wq);
+	}
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_close_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_close);
+static int
+vs_server_serial_handle_req_reopen(const struct vs_server_serial *_server,
+				   struct vs_server_serial_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->reopen)
+		return vs_server_serial_reopen_complete(_state,
+							_server->
+							reopen(_state));
+	else
+		return vs_server_serial_send_nack_reopen(_state,
+							 vs_service_has_atomic_rx
+							 (VS_STATE_SERVICE_PTR
+							  (_state)) ? GFP_ATOMIC
+							 : GFP_KERNEL);
+
+}
+
+int vs_server_serial_reopen_complete(struct vs_server_serial_state *_state,
+				     vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS) {
+		ret =
+		    vs_server_serial_send_ack_reopen(_state,
+						     vs_service_has_atomic_rx
+						     (VS_STATE_SERVICE_PTR
+						      (_state)) ? GFP_ATOMIC :
+						     GFP_KERNEL);
+	} else if (resp == VS_SERVER_RESP_FAILURE) {
+		ret =
+		    vs_server_serial_send_nack_reopen(_state,
+						      vs_service_has_atomic_rx
+						      (VS_STATE_SERVICE_PTR
+						       (_state)) ? GFP_ATOMIC :
+						      GFP_KERNEL);
+	}
+
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_reopen_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_reopen);
+struct vs_mbuf *vs_server_serial_serial_alloc_msg(struct vs_server_serial_state
+						  *_state, struct vs_pbuf *b,
+						  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!b)
+		goto fail;
+	b->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b->size = _state->serial.packet_size;
+	b->max_size = b->size;
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_alloc_msg);
+int vs_server_serial_serial_getbufs_msg(struct vs_server_serial_state *_state,
+					struct vs_pbuf *b,
+					struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	const size_t _min_size = _max_size - _state->serial.packet_size;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	b->size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	b->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b->max_size = b->size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->serial.packet_size - b->size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_getbufs_msg);
+int vs_server_serial_serial_free_msg(struct vs_server_serial_state *_state,
+				     struct vs_pbuf *b, struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_free_msg);
+int
+vs_server_serial_serial_send_msg(struct vs_server_serial_state *_state,
+				 struct vs_pbuf b, struct vs_mbuf *_mbuf)
+{
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_SERIAL_SERIAL_MSG_MSG)
+
+		return -EINVAL;
+
+	if ((b.size + sizeof(vs_message_id_t) + 0UL) > VS_MBUF_SIZE(_mbuf))
+		return -EINVAL;
+
+	if (b.size < b.max_size)
+		VS_MBUF_SIZE(_mbuf) -= (b.max_size - b.size);
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    b.size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_send_msg);
+static int
+vs_server_serial_serial_handle_msg(const struct vs_server_serial *_server,
+				   struct vs_server_serial_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	struct vs_pbuf b;
+	const size_t _min_size = _max_size - _state->serial.packet_size;
+	size_t _exact_size;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	b.size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	b.data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b.max_size = b.size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->serial.packet_size - b.size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_server->serial.msg_msg)
+		return _server->serial.msg_msg(_state, b, _mbuf);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_handle_msg);
+static int
+serial_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_server_serial_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_serial *server =
+	    to_server_driver(vsdrv)->server;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+	case VSERVICE_SERIAL_BASE_REQ_OPEN:
+		ret = vs_server_serial_handle_req_open(server, state, _mbuf);
+		break;
+
+/* command in sync close */
+	case VSERVICE_SERIAL_BASE_REQ_CLOSE:
+		ret = vs_server_serial_handle_req_close(server, state, _mbuf);
+		break;
+
+/* command in sync reopen */
+	case VSERVICE_SERIAL_BASE_REQ_REOPEN:
+		ret = vs_server_serial_handle_req_reopen(server, state, _mbuf);
+		break;
+
+/** interface serial **/
+/* message msg */
+	case VSERVICE_SERIAL_SERIAL_MSG_MSG:
+		ret = vs_server_serial_serial_handle_msg(server, state, _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void serial_handle_notify(struct vs_service_device *service,
+				 uint32_t notify_bits)
+{
+	__maybe_unused struct vs_server_serial_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_serial *server =
+	    to_server_driver(vsdrv)->server;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface serial **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services serialServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/session.c b/drivers/vservices/session.c
new file mode 100644
index 0000000..4048807
--- /dev/null
+++ b/drivers/vservices/session.c
@@ -0,0 +1,2843 @@
+/*
+ * drivers/vservices/session.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the generic session-management code for the vServices framework.
+ * It creates service and session devices on request from session and
+ * transport drivers, respectively; it also queues incoming messages from the
+ * transport and distributes them to the session's services.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+
+#include "session.h"
+#include "transport.h"
+#include "compat.h"
+
+/* Minimum required time between resets to avoid throttling */
+#define RESET_THROTTLE_TIME msecs_to_jiffies(1000)
+
+/*
+ * Minimum/maximum reset throttling time. The reset throttle will start at
+ * the minimum and increase to the maximum exponetially.
+ */
+#define RESET_THROTTLE_MIN RESET_THROTTLE_TIME
+#define RESET_THROTTLE_MAX msecs_to_jiffies(8 * 1000)
+
+/*
+ * If the reset is being throttled and a sane reset (doesn't need throttling)
+ * is requested, then if the service's reset delay mutliplied by this value
+ * has elapsed throttling is disabled.
+ */
+#define RESET_THROTTLE_COOL_OFF_MULT 2
+
+/* IDR of session ids to sessions */
+static DEFINE_IDR(session_idr);
+DEFINE_MUTEX(vs_session_lock);
+EXPORT_SYMBOL_GPL(vs_session_lock);
+
+/* Notifier list for vService session events */
+static BLOCKING_NOTIFIER_HEAD(vs_session_notifier_list);
+
+static unsigned long default_debug_mask;
+module_param(default_debug_mask, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(default_debug_mask, "Default vServices debug mask");
+
+/* vServices root in sysfs at /sys/vservices */
+struct kobject *vservices_root;
+EXPORT_SYMBOL_GPL(vservices_root);
+
+/* vServices server root in sysfs at /sys/vservices/server-sessions */
+struct kobject *vservices_server_root;
+EXPORT_SYMBOL_GPL(vservices_server_root);
+
+/* vServices client root in sysfs at /sys/vservices/client-sessions */
+struct kobject *vservices_client_root;
+EXPORT_SYMBOL_GPL(vservices_client_root);
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+struct vs_service_device *vs_service_lookup_by_devt(dev_t dev)
+{
+	struct vs_session_device *session;
+	struct vs_service_device *service;
+
+	mutex_lock(&vs_session_lock);
+	session = idr_find(&session_idr, MINOR(dev) / VS_MAX_SERVICES);
+	get_device(&session->dev);
+	mutex_unlock(&vs_session_lock);
+
+	service = vs_session_get_service(session,
+			MINOR(dev) % VS_MAX_SERVICES);
+	put_device(&session->dev);
+
+	return service;
+}
+#endif
+
+struct vs_session_for_each_data {
+	int (*fn)(struct vs_session_device *session, void *data);
+	void *data;
+};
+
+int vs_session_for_each_from_idr(int id, void *session, void *_data)
+{
+	struct vs_session_for_each_data *data =
+		(struct vs_session_for_each_data *)_data;
+	return data->fn(session, data->data);
+}
+
+/**
+ * vs_session_for_each_locked - call a callback function for each session
+ * @fn: function to call
+ * @data: opaque pointer that is passed through to the function
+ */
+extern int vs_session_for_each_locked(
+		int (*fn)(struct vs_session_device *session, void *data),
+		void *data)
+{
+	struct vs_session_for_each_data priv = { .fn = fn, .data = data };
+
+	lockdep_assert_held(&vs_session_lock);
+
+	return idr_for_each(&session_idr, vs_session_for_each_from_idr,
+			&priv);
+}
+EXPORT_SYMBOL(vs_session_for_each_locked);
+
+/**
+ * vs_register_notify - register a notifier callback for vServices events
+ * @nb: pointer to the notifier block for the callback events.
+ */
+void vs_session_register_notify(struct notifier_block *nb)
+{
+	blocking_notifier_chain_register(&vs_session_notifier_list, nb);
+}
+EXPORT_SYMBOL(vs_session_register_notify);
+
+/**
+ * vs_unregister_notify - unregister a notifier callback for vServices events
+ * @nb: pointer to the notifier block for the callback events.
+ */
+void vs_session_unregister_notify(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&vs_session_notifier_list, nb);
+}
+EXPORT_SYMBOL(vs_session_unregister_notify);
+
+/*
+ * Helper function for returning how long ago something happened
+ * Marked as __maybe_unused since this is only needed when
+ * CONFIG_VSERVICES_DEBUG is enabled, but cannot be removed because it
+ * will cause compile time errors.
+ */
+static __maybe_unused unsigned msecs_ago(unsigned long jiffy_value)
+{
+	return jiffies_to_msecs(jiffies - jiffy_value);
+}
+
+static void session_fatal_error_work(struct work_struct *work)
+{
+	struct vs_session_device *session = container_of(work,
+			struct vs_session_device, fatal_error_work);
+
+	session->transport->vt->reset(session->transport);
+}
+
+static void session_fatal_error(struct vs_session_device *session, gfp_t gfp)
+{
+	schedule_work(&session->fatal_error_work);
+}
+
+/*
+ * Service readiness state machine
+ *
+ * The states are:
+ *
+ * INIT: Initial state. Service may not be completely configured yet
+ * (typically because the protocol hasn't been set); call vs_service_start
+ * once configuration is complete. The disable count must be nonzero, and
+ * must never reach zero in this state.
+ * DISABLED: Service is not permitted to communicate. Non-core services are
+ * in this state whenever the core protocol and/or transport state does not
+ * allow them to be active; core services are only in this state transiently.
+ * The disable count must be nonzero; when it reaches zero, the service
+ * transitions to RESET state.
+ * RESET: Service drivers are inactive at both ends, but the core service
+ * state allows the service to become active. The session will schedule a
+ * future transition to READY state when entering this state, but the
+ * transition may be delayed to throttle the rate at which resets occur.
+ * READY: All core-service and session-layer policy allows the service to
+ * communicate; it will become active as soon as it has a protocol driver.
+ * ACTIVE: The driver is present and communicating.
+ * LOCAL_RESET: We have initiated a reset at this end, but the remote end has
+ * not yet acknowledged it. We will enter the RESET state on receiving
+ * acknowledgement, unless the disable count is nonzero in which case we
+ * will enter DISABLED state.
+ * LOCAL_DELETE: As for LOCAL_RESET, but we will enter the DELETED state
+ * instead of RESET or DISABLED.
+ * DELETED: The service is no longer present on the session; the service
+ * device structure may still exist because something is holding a reference
+ * to it.
+ *
+ * The permitted transitions are:
+ *
+ * From          To            Trigger
+ * INIT          DISABLED      vs_service_start
+ * DISABLED      RESET         vs_service_enable (disable_count -> 0)
+ * RESET         READY         End of throttle delay (may be 0)
+ * READY         ACTIVE        Latter of probe() and entering READY
+ * {READY, ACTIVE}
+ *               LOCAL_RESET   vs_service_reset
+ * {READY, ACTIVE, LOCAL_RESET}
+ *               RESET         vs_service_handle_reset (server)
+ * RESET         DISABLED      vs_service_disable (server)
+ * {READY, ACTIVE, LOCAL_RESET}
+ *               DISABLED      vs_service_handle_reset (client)
+ * {INIT, RESET, READY, ACTIVE, LOCAL_RESET}
+ *               DISABLED      vs_service_disable_noncore
+ * {ACTIVE, LOCAL_RESET}
+ *               LOCAL_DELETE  vs_service_delete
+ * {INIT, DISABLED, RESET, READY}
+ *               DELETED       vs_service_delete
+ * LOCAL_DELETE  DELETED       vs_service_handle_reset
+ *                             vs_service_disable_noncore
+ *
+ * See the documentation for the triggers for details.
+ */
+
+enum vs_service_readiness {
+	VS_SERVICE_INIT,
+	VS_SERVICE_DISABLED,
+	VS_SERVICE_RESET,
+	VS_SERVICE_READY,
+	VS_SERVICE_ACTIVE,
+	VS_SERVICE_LOCAL_RESET,
+	VS_SERVICE_LOCAL_DELETE,
+	VS_SERVICE_DELETED,
+};
+
+/* Session activation states. */
+enum {
+	VS_SESSION_RESET,
+	VS_SESSION_ACTIVATE,
+	VS_SESSION_ACTIVE,
+};
+
+/**
+ * vs_service_start - Start a service by moving it from the init state to the
+ * disabled state.
+ *
+ * @service: The service to start.
+ *
+ * Returns true if the service was started, or false if it was not.
+ */
+bool vs_service_start(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+
+	WARN_ON(!service->protocol);
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	if (service->readiness != VS_SERVICE_INIT) {
+		if (service->readiness != VS_SERVICE_DELETED)
+			dev_err(&service->dev,
+					"start called from invalid state %d\n",
+					service->readiness);
+		mutex_unlock(&service->ready_lock);
+		return false;
+	}
+
+	if (service->id != 0 && session_drv->service_added) {
+		int err = session_drv->service_added(session, service);
+		if (err < 0) {
+			dev_err(&session->dev, "Failed to add service %d: %d\n",
+					service->id, err);
+			mutex_unlock(&service->ready_lock);
+			return false;
+		}
+	}
+
+	service->readiness = VS_SERVICE_DISABLED;
+	service->disable_count = 1;
+	service->last_reset_request = jiffies;
+
+	mutex_unlock(&service->ready_lock);
+
+	/* Tell userspace about the service. */
+	dev_set_uevent_suppress(&service->dev, false);
+	kobject_uevent(&service->dev.kobj, KOBJ_ADD);
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(vs_service_start);
+
+static void cancel_pending_rx(struct vs_service_device *service);
+static void queue_ready_work(struct vs_service_device *service);
+
+static void __try_start_service(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	struct vs_transport *transport;
+	int err;
+	struct vs_service_driver *driver;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	/* We can't start if the service is not ready yet. */
+	if (service->readiness != VS_SERVICE_READY)
+		return;
+
+	/*
+	 * There should never be anything in the RX queue at this point.
+	 * If there is, it can seriously confuse the service drivers for
+	 * no obvious reason, so we check.
+	 */
+	if (WARN_ON(!list_empty(&service->rx_queue)))
+		cancel_pending_rx(service);
+
+	if (!service->driver_probed) {
+		vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+				"ready with no driver\n");
+		return;
+	}
+
+	/* Prepare the transport to support the service. */
+	transport = session->transport;
+	err = transport->vt->service_start(transport, service);
+
+	if (err < 0) {
+		/* fatal error attempting to start; reset and try again */
+		service->readiness = VS_SERVICE_RESET;
+		service->last_reset_request = jiffies;
+		service->last_reset = jiffies;
+		queue_ready_work(service);
+
+		return;
+	}
+
+	service->readiness = VS_SERVICE_ACTIVE;
+
+	driver = to_vs_service_driver(service->dev.driver);
+	if (driver->start)
+		driver->start(service);
+
+	if (service->id && session_drv->service_start) {
+		err = session_drv->service_start(session, service);
+		if (err < 0) {
+			dev_err(&session->dev, "Failed to start service %s (%d): %d\n",
+					dev_name(&service->dev),
+					service->id, err);
+			session_fatal_error(session, GFP_KERNEL);
+		}
+	}
+}
+
+static void try_start_service(struct vs_service_device *service)
+{
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	__try_start_service(service);
+
+	mutex_unlock(&service->ready_lock);
+}
+
+static void service_ready_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, ready_work.work);
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+			"ready work - last reset request was %u ms ago\n",
+			msecs_ago(service->last_reset_request));
+
+	/*
+	 * Make sure there's no reset work pending from an earlier driver
+	 * failure. We should already be inactive at this point, so it's safe
+	 * to just cancel it.
+	 */
+	cancel_work_sync(&service->reset_work);
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	if (service->readiness != VS_SERVICE_RESET) {
+		vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+				"ready work found readiness of %d, doing nothing\n",
+				service->readiness);
+		mutex_unlock(&service->ready_lock);
+		return;
+	}
+
+	service->readiness = VS_SERVICE_READY;
+	/* Record the time at which this happened, for throttling. */
+	service->last_ready = jiffies;
+
+	/* Tell userspace that the service is ready. */
+	kobject_uevent(&service->dev.kobj, KOBJ_ONLINE);
+
+	/* Start the service, if it has a driver attached. */
+	__try_start_service(service);
+
+	mutex_unlock(&service->ready_lock);
+}
+
+static int __enable_service(struct vs_service_device *service);
+
+/**
+ * __reset_service - make a service inactive, and tell its driver, the
+ * transport, and possibly the remote partner
+ * @service:       The service to reset
+ * @notify_remote: If true, the partner is notified of the reset
+ *
+ * This routine is called to make an active service inactive. If the given
+ * service is currently active, it drops any queued messages for the service,
+ * and then informs the service driver and the transport layer that the
+ * service has reset. It sets the service readiness to VS_SERVICE_LOCAL_RESET
+ * to indicate that the driver is no longer active.
+ *
+ * This routine has no effect on services that are not active.
+ *
+ * The caller must hold the target service's ready lock.
+ */
+static void __reset_service(struct vs_service_device *service,
+		bool notify_remote)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	struct vs_service_driver *driver = NULL;
+	struct vs_transport *transport;
+	int err;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	/* If we're already inactive, there's nothing to do. */
+	if (service->readiness != VS_SERVICE_ACTIVE)
+		return;
+
+	service->last_reset = jiffies;
+	service->readiness = VS_SERVICE_LOCAL_RESET;
+
+	cancel_pending_rx(service);
+
+	if (!WARN_ON(!service->driver_probed))
+		driver = to_vs_service_driver(service->dev.driver);
+
+	if (driver && driver->reset)
+		driver->reset(service);
+
+	wake_up_all(&service->quota_wq);
+
+	transport = vs_service_get_session(service)->transport;
+
+	/*
+	 * Ask the transport to reset the service. If this returns a positive
+	 * value, we need to leave the service disabled, and the transport
+	 * will re-enable it. To avoid allowing the disable count to go
+	 * negative if that re-enable races with this callback returning, we
+	 * disable the service beforehand and re-enable it if the callback
+	 * returns zero.
+	 */
+	service->disable_count++;
+	err = transport->vt->service_reset(transport, service);
+	if (err < 0) {
+		dev_err(&session->dev, "Failed to reset service %d: %d (transport)\n",
+				service->id, err);
+		session_fatal_error(session, GFP_KERNEL);
+	} else if (!err) {
+		err = __enable_service(service);
+	}
+
+	if (notify_remote) {
+		if (service->id) {
+			err = session_drv->service_local_reset(session,
+					service);
+			if (err == VS_SERVICE_ALREADY_RESET) {
+				service->readiness = VS_SERVICE_RESET;
+                                service->last_reset = jiffies;
+                                queue_ready_work(service);
+
+			} else if (err < 0) {
+				dev_err(&session->dev, "Failed to reset service %d: %d (session)\n",
+						service->id, err);
+				session_fatal_error(session, GFP_KERNEL);
+			}
+		} else {
+			session->transport->vt->reset(session->transport);
+		}
+	}
+
+	/* Tell userspace that the service is no longer active. */
+	kobject_uevent(&service->dev.kobj, KOBJ_OFFLINE);
+}
+
+/**
+ * reset_service - reset a service and inform the remote partner
+ * @service: The service to reset
+ *
+ * This routine is called when a reset is locally initiated (other than
+ * implicitly by a session / core service reset). It bumps the reset request
+ * timestamp, acquires the necessary locks, and calls __reset_service.
+ *
+ * This routine returns with the service ready lock held, to allow the caller
+ * to make any other state changes that must be atomic with the service
+ * reset.
+ */
+static void reset_service(struct vs_service_device *service)
+	__acquires(service->ready_lock)
+{
+	service->last_reset_request = jiffies;
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	__reset_service(service, true);
+}
+
+/**
+ * vs_service_reset - initiate a service reset
+ * @service: the service that is to be reset
+ * @caller: the service that is initiating the reset
+ *
+ * This routine informs the partner that the given service is being reset,
+ * then disables and flushes the service's receive queues and resets its
+ * driver. The service will be automatically re-enabled once the partner has
+ * acknowledged the reset (see vs_session_handle_service_reset, above).
+ *
+ * If the given service is the core service, this will perform a transport
+ * reset, which implicitly resets (on the server side) or destroys (on
+ * the client side) every other service on the session.
+ *
+ * If the given service is already being reset, this has no effect, other
+ * than to delay completion of the reset if it is being throttled.
+ *
+ * For lock safety reasons, a service can only be directly reset by itself,
+ * the core service, or the service that created it (which is typically also
+ * the core service).
+ *
+ * A service that wishes to reset itself must not do so while holding its state
+ * lock or while running on its own workqueue. In these circumstances, call
+ * vs_service_reset_nosync() instead. Note that returning an error code
+ * (any negative number) from a driver callback forces a call to
+ * vs_service_reset_nosync() and prints an error message.
+ */
+int vs_service_reset(struct vs_service_device *service,
+		struct vs_service_device *caller)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	if (caller != service && caller != service->owner) {
+		struct vs_service_device *core_service = session->core_service;
+
+		WARN_ON(!core_service);
+		if (caller != core_service)
+			return -EPERM;
+	}
+
+	reset_service(service);
+	/* reset_service returns with ready_lock held, but we don't need it */
+	mutex_unlock(&service->ready_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_reset);
+
+/**
+ * vs_service_reset_nosync - asynchronously reset a service.
+ * @service: the service that is to be reset
+ *
+ * This routine triggers a reset for the nominated service. It may be called
+ * from any context, including interrupt context. It does not wait for the
+ * reset to occur, and provides no synchronisation guarantees when called from
+ * outside the target service.
+ *
+ * This is intended only for service drivers that need to reset themselves
+ * from a context that would not normally allow it. In other cases, use
+ * vs_service_reset.
+ */
+void vs_service_reset_nosync(struct vs_service_device *service)
+{
+	service->pending_reset = true;
+	schedule_work(&service->reset_work);
+}
+EXPORT_SYMBOL_GPL(vs_service_reset_nosync);
+
+static void
+vs_service_remove_sysfs_entries(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	sysfs_remove_link(session->sysfs_entry, service->sysfs_name);
+	sysfs_remove_link(&service->dev.kobj, VS_SESSION_SYMLINK_NAME);
+}
+
+static void vs_session_release_service_id(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	mutex_lock(&session->service_idr_lock);
+	idr_remove(&session->service_idr, service->id);
+	mutex_unlock(&session->service_idr_lock);
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+			"service id deallocated\n");
+}
+
+static void destroy_service(struct vs_service_device *service,
+		bool notify_remote)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	struct vs_service_device *core_service __maybe_unused =
+			session->core_service;
+	int err;
+
+	lockdep_assert_held(&service->ready_lock);
+	WARN_ON(service->readiness != VS_SERVICE_DELETED);
+
+	/* Notify the core service and transport that the service is gone */
+	session->transport->vt->service_remove(session->transport, service);
+	if (notify_remote && service->id && session_drv->service_removed) {
+		err = session_drv->service_removed(session, service);
+		if (err < 0) {
+			dev_err(&session->dev,
+					"Failed to remove service %d: %d\n",
+					service->id, err);
+			session_fatal_error(session, GFP_KERNEL);
+		}
+	}
+
+	/*
+	 * At this point the service is guaranteed to be gone on the client
+	 * side, so we can safely release the service ID.
+	 */
+	if (session->is_server)
+		vs_session_release_service_id(service);
+
+	/*
+	 * This guarantees that any concurrent vs_session_get_service() that
+	 * found the service before we removed it from the IDR will take a
+	 * reference before we release ours.
+	 *
+	 * This similarly protects for_each_[usable_]service().
+	 */
+	synchronize_rcu();
+
+	/* Matches device_initialize() in vs_service_register() */
+	put_device(&service->dev);
+}
+
+/**
+ * disable_service - prevent a service becoming ready
+ * @service: the service that is to be disabled
+ * @force: true if the service is known to be in reset
+ *
+ * This routine may be called for any inactive service. Once disabled, the
+ * service cannot be made ready by the session, and thus cannot become active,
+ * until vs_service_enable() is called for it. If multiple calls are made to
+ * this function, they must be balanced by vs_service_enable() calls.
+ *
+ * If the force option is true, then any pending unacknowledged reset will be
+ * presumed to have been acknowledged. This is used when the core service is
+ * entering reset.
+ *
+ * This is used by the core service client to prevent the service restarting
+ * until the server is ready (i.e., a server_ready message is received); by
+ * the session layer to stop all communication while the core service itself
+ * is in reset; and by the transport layer when the transport was unable to
+ * complete reset of a service in its reset callback (typically because
+ * a service had passed message buffers to another Linux subsystem and could
+ * not free them immediately).
+ *
+ * In any case, there is no need for the operation to be signalled in any
+ * way, because the service is already in reset. It simply delays future
+ * signalling of service readiness.
+ */
+static void disable_service(struct vs_service_device *service, bool force)
+{
+	lockdep_assert_held(&service->ready_lock);
+
+	switch(service->readiness) {
+	case VS_SERVICE_INIT:
+	case VS_SERVICE_DELETED:
+	case VS_SERVICE_LOCAL_DELETE:
+		dev_err(&service->dev, "disabled while uninitialised\n");
+		break;
+	case VS_SERVICE_ACTIVE:
+		dev_err(&service->dev, "disabled while active\n");
+		break;
+	case VS_SERVICE_LOCAL_RESET:
+		/*
+		 * Will go to DISABLED state when reset completes, unless
+		 * it's being forced (i.e. we're moving to a core protocol
+		 * state that implies everything else is reset).
+		 */
+		if (force)
+			service->readiness = VS_SERVICE_DISABLED;
+		service->disable_count++;
+		break;
+	default:
+		service->readiness = VS_SERVICE_DISABLED;
+		service->disable_count++;
+		break;
+	}
+
+	cancel_delayed_work(&service->ready_work);
+}
+
+static int service_handle_reset(struct vs_session_device *session,
+		struct vs_service_device *target, bool disable)
+{
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	int err = 0;
+
+	mutex_lock_nested(&target->ready_lock, target->lock_subclass);
+
+	switch (target->readiness) {
+	case VS_SERVICE_LOCAL_DELETE:
+		target->readiness = VS_SERVICE_DELETED;
+		destroy_service(target, true);
+		break;
+	case VS_SERVICE_ACTIVE:
+		/*
+		 * Reset the service and send a reset notification.
+		 *
+		 * We only send notifications for non-core services. This is
+		 * because core notifies by sending a transport reset, which
+		 * is what brought us here in the first place. Note that we
+		 * must already hold the core service state lock iff the
+		 * target is non-core.
+		 */
+		target->last_reset_request = jiffies;
+		__reset_service(target, target->id != 0);
+		/* fall through */
+	case VS_SERVICE_LOCAL_RESET:
+		target->readiness = target->disable_count ?
+			VS_SERVICE_DISABLED : VS_SERVICE_RESET;
+		if (disable)
+			disable_service(target, false);
+		if (target->readiness != VS_SERVICE_DISABLED)
+			queue_ready_work(target);
+		break;
+	case VS_SERVICE_READY:
+		/* Tell userspace that the service is no longer ready. */
+		kobject_uevent(&target->dev.kobj, KOBJ_OFFLINE);
+		/* fall through */
+	case VS_SERVICE_RESET:
+		/*
+		 * This can happen for a non-core service if we get a reset
+		 * request from the server on the client side, after the
+		 * client has enabled the service but before it is active.
+		 * Note that the service is already active on the server side
+		 * at this point. The client's delay may be due to either
+		 * reset throttling or the absence of a driver.
+		 *
+		 * We bump the reset request timestamp, disable the service
+		 * again, and send back an acknowledgement.
+		 */
+		if (disable && target->id) {
+			target->last_reset_request = jiffies;
+
+			err = session_drv->service_local_reset(
+					session, target);
+			if (err < 0) {
+				dev_err(&session->dev,
+						"Failed to reset service %d; %d\n",
+						target->id, err);
+				session_fatal_error(session,
+						GFP_KERNEL);
+			}
+
+			disable_service(target, false);
+			break;
+		}
+		/* fall through */
+	case VS_SERVICE_DISABLED:
+		/*
+		 * This can happen for the core service if we get a reset
+		 * before the transport has activated, or before the core
+		 * service has become ready.
+		 *
+		 * We bump the reset request timestamp, and disable the
+		 * service again if the transport had already activated and
+		 * enabled it.
+		 */
+		if (disable && !target->id) {
+			target->last_reset_request = jiffies;
+
+			if (target->readiness != VS_SERVICE_DISABLED)
+				disable_service(target, false);
+
+			break;
+		}
+		/* fall through */
+	default:
+		dev_warn(&target->dev, "remote reset while inactive (%d)\n",
+				target->readiness);
+		err = -EPROTO;
+		break;
+	}
+
+	mutex_unlock(&target->ready_lock);
+	return err;
+}
+
+/**
+ * vs_service_handle_reset - handle an incoming notification of a reset
+ * @session: the session that owns the service
+ * @service_id: the ID of the service that is to be reset
+ * @disable: if true, the service will not be automatically re-enabled
+ *
+ * This routine is called by the core service when the remote end notifies us
+ * of a non-core service reset. The service must be in ACTIVE, LOCAL_RESET or
+ * LOCAL_DELETED state. It must be called with the core service's state lock
+ * held.
+ *
+ * If the service was in ACTIVE state, the core service is called back to send
+ * a notification to the other end. If it was in LOCAL_DELETED state, it is
+ * unregistered.
+ */
+int vs_service_handle_reset(struct vs_session_device *session,
+		vs_service_id_t service_id, bool disable)
+{
+	struct vs_service_device *target;
+	int ret;
+
+	if (!service_id)
+		return -EINVAL;
+
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -ENODEV;
+
+	ret = service_handle_reset(session, target, disable);
+	vs_put_service(target);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vs_service_handle_reset);
+
+static int __enable_service(struct vs_service_device *service)
+{
+	if (WARN_ON(!service->disable_count))
+		return -EINVAL;
+
+	if (--service->disable_count > 0)
+		return 0;
+
+	/*
+	 * If the service is still resetting, it can't become ready until the
+	 * reset completes. If it has been deleted, it will never become
+	 * ready. In either case, there's nothing more to do.
+	 */
+	if ((service->readiness == VS_SERVICE_LOCAL_RESET) ||
+			(service->readiness == VS_SERVICE_LOCAL_DELETE) ||
+			(service->readiness == VS_SERVICE_DELETED))
+		return 0;
+
+	if (WARN_ON(service->readiness != VS_SERVICE_DISABLED))
+		return -EINVAL;
+
+	service->readiness = VS_SERVICE_RESET;
+	service->last_reset = jiffies;
+	queue_ready_work(service);
+
+	return 0;
+}
+
+/**
+ * vs_service_enable - allow a service to become ready
+ * @service: the service that is to be enabled
+ *
+ * Calling this routine for a service permits the session layer to make the
+ * service ready. It will do so as soon as any outstanding reset throttling
+ * is complete, and will then start the service once it has a driver attached.
+ *
+ * Services are disabled, requiring a call to this routine to re-enable them:
+ * - when first initialised (after vs_service_start),
+ * - when reset on the client side by vs_service_handle_reset,
+ * - when the transport has delayed completion of a reset, and
+ * - when the server-side core protocol is disconnected or reset by
+ *   vs_session_disable_noncore.
+ */
+int vs_service_enable(struct vs_service_device *service)
+{
+	int ret;
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	ret = __enable_service(service);
+
+	mutex_unlock(&service->ready_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vs_service_enable);
+
+/*
+ * Service work functions
+ */
+static void queue_rx_work(struct vs_service_device *service)
+{
+	bool rx_atomic;
+
+	rx_atomic = vs_service_has_atomic_rx(service);
+	vs_dev_debug(VS_DEBUG_SESSION, vs_service_get_session(service),
+			&service->dev, "Queuing rx %s\n",
+			rx_atomic ? "tasklet (atomic)" : "work (cansleep)");
+
+	if (rx_atomic)
+		tasklet_schedule(&service->rx_tasklet);
+	else
+		queue_work(service->work_queue, &service->rx_work);
+}
+
+static void cancel_pending_rx(struct vs_service_device *service)
+{
+	struct vs_mbuf *mbuf;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	cancel_work_sync(&service->rx_work);
+	tasklet_kill(&service->rx_tasklet);
+
+	spin_lock_irq(&service->rx_lock);
+	while (!list_empty(&service->rx_queue)) {
+		mbuf = list_first_entry(&service->rx_queue,
+				struct vs_mbuf, queue);
+		list_del_init(&mbuf->queue);
+		spin_unlock_irq(&service->rx_lock);
+		vs_service_free_mbuf(service, mbuf);
+		spin_lock_irq(&service->rx_lock);
+	}
+	service->tx_ready = false;
+	spin_unlock_irq(&service->rx_lock);
+}
+
+static bool reset_throttle_cooled_off(struct vs_service_device *service);
+static unsigned long reset_cool_off(struct vs_service_device *service);
+
+static void service_cooloff_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, cooloff_work.work);
+	struct vs_session_device *session = vs_service_get_session(service);
+	unsigned long current_time = jiffies, wake_time;
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	if (reset_throttle_cooled_off(service)) {
+		vs_debug(VS_DEBUG_SESSION, session,
+				"Reset thrashing cooled off (delay = %u ms, cool off = %u ms, last reset %u ms ago, last reset request was %u ms ago)\n",
+				jiffies_to_msecs(service->reset_delay),
+				jiffies_to_msecs(reset_cool_off(service)),
+				msecs_ago(service->last_reset),
+				msecs_ago(service->last_reset_request));
+
+		service->reset_delay = 0;
+
+		/*
+		 * If the service is already in reset, then queue_ready_work
+		 * has already run and has deferred queuing of the ready_work
+		 * until cooloff. Schedule the ready work to run immediately.
+		 */
+		if (service->readiness == VS_SERVICE_RESET)
+			schedule_delayed_work(&service->ready_work, 0);
+	} else {
+		/*
+		 * This can happen if last_reset_request has been bumped
+		 * since the cooloff work was first queued. We need to
+		 * work out how long it is until the service cools off,
+		 * then reschedule ourselves.
+		 */
+		wake_time = reset_cool_off(service) +
+				service->last_reset_request;
+
+		WARN_ON(time_after(current_time, wake_time));
+
+		schedule_delayed_work(&service->cooloff_work,
+				wake_time - current_time);
+	}
+
+	mutex_unlock(&service->ready_lock);
+}
+
+static void
+service_reset_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, reset_work);
+
+	service->pending_reset = false;
+
+	vs_service_reset(service, service);
+}
+
+/* Returns true if there are more messages to handle */
+static bool
+dequeue_and_handle_received_message(struct vs_service_device *service)
+{
+	struct vs_service_driver *driver =
+			to_vs_service_driver(service->dev.driver);
+	struct vs_session_device *session = vs_service_get_session(service);
+	const struct vs_transport_vtable *vt = session->transport->vt;
+	struct vs_service_stats *stats = &service->stats;
+	struct vs_mbuf *mbuf;
+	size_t size;
+	int ret;
+
+	/* Don't do rx work unless the service is active */
+	if (service->readiness != VS_SERVICE_ACTIVE)
+		return false;
+
+	/* Atomically take an item from the queue */
+	spin_lock_irq(&service->rx_lock);
+	if (!list_empty(&service->rx_queue)) {
+		mbuf = list_first_entry(&service->rx_queue, struct vs_mbuf,
+				queue);
+		list_del_init(&mbuf->queue);
+		spin_unlock_irq(&service->rx_lock);
+		size = vt->mbuf_size(mbuf);
+
+		/*
+		 * Call the message handler for the service. The service's
+		 * message handler is responsible for freeing the mbuf when it
+		 * is done with it.
+		 */
+		ret = driver->receive(service, mbuf);
+		if (ret < 0) {
+			atomic_inc(&service->stats.recv_failures);
+			dev_err(&service->dev,
+					"receive returned %d; resetting service\n",
+					ret);
+			vs_service_reset_nosync(service);
+			return false;
+		} else {
+			atomic_add(size, &service->stats.recv_bytes);
+			atomic_inc(&service->stats.recv_mbufs);
+		}
+
+	} else if (service->tx_ready) {
+		service->tx_ready = false;
+		spin_unlock_irq(&service->rx_lock);
+
+		/*
+		 * Update the tx_ready stats accounting and then call the
+		 * service's tx_ready handler.
+		 */
+		atomic_inc(&stats->nr_tx_ready);
+		if (atomic_read(&stats->nr_over_quota) > 0) {
+			int total;
+
+			total = atomic_add_return(jiffies_to_msecs(jiffies -
+							stats->over_quota_time),
+					&stats->over_quota_time_total);
+			atomic_set(&stats->over_quota_time_avg, total /
+					atomic_read(&stats->nr_over_quota));
+		}
+		atomic_set(&service->is_over_quota, 0);
+
+		/*
+		 * Note that a service's quota may reduce at any point, even
+		 * during the tx_ready handler. This is important if a service
+		 * has an ordered list of pending messages to send. If a
+		 * message fails to send from the tx_ready handler due to
+		 * over-quota then subsequent messages in the same handler may
+		 * send successfully. To avoid sending messages in the
+		 * incorrect order the service's tx_ready handler should
+		 * return immediately if a message fails to send.
+		 */
+		ret = driver->tx_ready(service);
+		if (ret < 0) {
+			dev_err(&service->dev,
+					"tx_ready returned %d; resetting service\n",
+					ret);
+			vs_service_reset_nosync(service);
+			return false;
+		}
+	} else {
+		spin_unlock_irq(&service->rx_lock);
+	}
+
+	/*
+	 * There's no need to lock for this list_empty: if we race
+	 * with a msg enqueue, we'll be rescheduled by the other side,
+	 * and if we race with a dequeue, we'll just do nothing when
+	 * we run (or will be cancelled before we run).
+	 */
+	return !list_empty(&service->rx_queue) || service->tx_ready;
+}
+
+static void service_rx_tasklet(unsigned long data)
+{
+	struct vs_service_device *service = (struct vs_service_device *)data;
+	bool resched;
+
+	/*
+	 * There is no need to acquire the state spinlock or mutex here,
+	 * because this tasklet is disabled when the lock is held. These
+	 * are annotations for sparse and lockdep, respectively.
+	 *
+	 * We can't annotate the implicit mutex acquire because lockdep gets
+	 * upset about inconsistent softirq states.
+	 */
+	__acquire(service);
+	spin_acquire(&service->state_spinlock.dep_map, 0, 0, _THIS_IP_);
+
+	resched = dequeue_and_handle_received_message(service);
+
+	if (resched)
+		tasklet_schedule(&service->rx_tasklet);
+
+	spin_release(&service->state_spinlock.dep_map, 0, _THIS_IP_);
+	__release(service);
+}
+
+static void service_rx_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, rx_work);
+	bool requeue;
+
+	/*
+	 * We must acquire the state mutex here to protect services that
+	 * are using vs_service_state_lock().
+	 *
+	 * There is no need to acquire the spinlock, which is never used in
+	 * drivers with task context receive handlers.
+	 */
+	vs_service_state_lock(service);
+
+	requeue = dequeue_and_handle_received_message(service);
+
+	vs_service_state_unlock(service);
+
+	if (requeue)
+		queue_work(service->work_queue, work);
+}
+
+/*
+ * Service sysfs statistics counters. These files are all atomic_t, and
+ * read only, so we use a generator macro to avoid code duplication.
+ */
+#define service_stat_attr(__name)						\
+	static ssize_t __name##_show(struct device *dev,	\
+			struct device_attribute *attr, char *buf)	\
+	{													\
+		struct vs_service_device *service =				\
+				to_vs_service_device(dev);				\
+														\
+		return scnprintf(buf, PAGE_SIZE, "%u\n",		\
+				atomic_read(&service->stats.__name));	\
+	}													\
+	static DEVICE_ATTR_RO(__name)
+
+service_stat_attr(sent_mbufs);
+service_stat_attr(sent_bytes);
+service_stat_attr(recv_mbufs);
+service_stat_attr(recv_bytes);
+service_stat_attr(nr_over_quota);
+service_stat_attr(nr_tx_ready);
+service_stat_attr(over_quota_time_total);
+service_stat_attr(over_quota_time_avg);
+
+static struct attribute *service_stat_dev_attrs[] = {
+	&dev_attr_sent_mbufs.attr,
+	&dev_attr_sent_bytes.attr,
+	&dev_attr_recv_mbufs.attr,
+	&dev_attr_recv_bytes.attr,
+	&dev_attr_nr_over_quota.attr,
+	&dev_attr_nr_tx_ready.attr,
+	&dev_attr_over_quota_time_total.attr,
+	&dev_attr_over_quota_time_avg.attr,
+	NULL,
+};
+
+static const struct attribute_group service_stat_attributes = {
+	.name   = "stats",
+	.attrs  = service_stat_dev_attrs,
+};
+
+static void delete_service(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	bool notify_on_destroy = true;
+
+	/* FIXME: Jira ticket SDK-3495 - philipd. */
+	/* This should be the caller's responsibility */
+	vs_get_service(service);
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	/*
+	 * If we're on the client side, the service should already have been
+	 * disabled at this point.
+	 */
+	WARN_ON(service->id != 0 && !session->is_server &&
+			service->readiness != VS_SERVICE_DISABLED &&
+			service->readiness != VS_SERVICE_DELETED);
+
+	/*
+	 * Make sure the service is not active, and notify the remote end if
+	 * it needs to be reset. Note that we already hold the core service
+	 * state lock iff this is a non-core service.
+	 */
+	__reset_service(service, true);
+
+	/*
+	 * If the remote end is aware that the service is inactive, we can
+	 * delete right away; otherwise we need to wait for a notification
+	 * that the service has reset.
+	 */
+	switch (service->readiness) {
+	case VS_SERVICE_LOCAL_DELETE:
+	case VS_SERVICE_DELETED:
+		/* Nothing to do here */
+		mutex_unlock(&service->ready_lock);
+		vs_put_service(service);
+		return;
+	case VS_SERVICE_ACTIVE:
+		BUG();
+		break;
+	case VS_SERVICE_LOCAL_RESET:
+		service->readiness = VS_SERVICE_LOCAL_DELETE;
+		break;
+	case VS_SERVICE_INIT:
+		notify_on_destroy = false;
+		/* Fall through */
+	default:
+		service->readiness = VS_SERVICE_DELETED;
+		destroy_service(service, notify_on_destroy);
+		break;
+	}
+
+	mutex_unlock(&service->ready_lock);
+
+	/*
+	 * Remove service syslink from
+	 * sys/vservices/(<server>/<client>)-sessions/ directory
+	 */
+	vs_service_remove_sysfs_entries(session, service);
+
+	sysfs_remove_group(&service->dev.kobj, &service_stat_attributes);
+
+	/*
+	 * On the client-side we need to release the service id as soon as
+	 * the service is deleted. Otherwise the server may attempt to create
+	 * a new service with this id.
+	 */
+	if (!session->is_server)
+		vs_session_release_service_id(service);
+
+	device_del(&service->dev);
+	vs_put_service(service);
+}
+
+/**
+ * vs_service_delete - deactivate and start removing a service device
+ * @service: the service to delete
+ * @caller: the service initiating deletion
+ *
+ * Services may only be deleted by their owner (on the server side), or by the
+ * core service. This function must not be called for the core service.
+ */
+int vs_service_delete(struct vs_service_device *service,
+		struct vs_service_device *caller)
+{
+	struct vs_session_device *session =
+			vs_service_get_session(service);
+	struct vs_service_device *core_service = session->core_service;
+
+	if (WARN_ON(!core_service))
+		return -ENODEV;
+
+	if (!service->id)
+		return -EINVAL;
+
+	if (caller != service->owner && caller != core_service)
+		return -EPERM;
+
+	delete_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_delete);
+
+/**
+ * vs_service_handle_delete - deactivate and start removing a service device
+ * @service: the service to delete
+ *
+ * This is a variant of vs_service_delete which must only be called by the
+ * core service. It is used by the core service client when a service_removed
+ * message is received.
+ */
+int vs_service_handle_delete(struct vs_service_device *service)
+{
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(service);
+	struct vs_service_device *core_service __maybe_unused =
+			session->core_service;
+
+	lockdep_assert_held(&core_service->state_mutex);
+
+	delete_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_handle_delete);
+
+static void service_cleanup_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, cleanup_work);
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev, "cleanup\n");
+
+	if (service->owner)
+		vs_put_service(service->owner);
+
+	/* Put our reference to the session */
+	if (service->dev.parent)
+		put_device(service->dev.parent);
+
+	tasklet_kill(&service->rx_tasklet);
+	cancel_work_sync(&service->rx_work);
+	cancel_delayed_work_sync(&service->cooloff_work);
+	cancel_delayed_work_sync(&service->ready_work);
+	cancel_work_sync(&service->reset_work);
+
+	if (service->work_queue)
+		destroy_workqueue(service->work_queue);
+
+	kfree(service->sysfs_name);
+	kfree(service->name);
+	kfree(service->protocol);
+	kfree(service);
+}
+
+static void vs_service_release(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	vs_dev_debug(VS_DEBUG_SESSION, vs_service_get_session(service),
+			&service->dev, "release\n");
+
+	/*
+	 * We need to defer cleanup to avoid a circular dependency between the
+	 * core service's state lock (which can be held at this point, on the
+	 * client side) and any non-core service's reset work (which we must
+	 * cancel here, and which acquires the core service state lock).
+	 */
+	schedule_work(&service->cleanup_work);
+}
+
+static int service_add_idr(struct vs_session_device *session,
+		struct vs_service_device *service, vs_service_id_t service_id)
+{
+	int start, end, id;
+
+	if (service_id == VS_SERVICE_AUTO_ALLOCATE_ID) {
+		start = 1;
+		end = VS_MAX_SERVICES;
+	} else {
+		start = service_id;
+		end = service_id + 1;
+	}
+
+	mutex_lock(&session->service_idr_lock);
+	id = idr_alloc(&session->service_idr, service, start, end,
+			GFP_KERNEL);
+	mutex_unlock(&session->service_idr_lock);
+
+	if (id == -ENOSPC)
+		return -EBUSY;
+	else if (id < 0)
+		return id;
+
+	service->id = id;
+	return 0;
+}
+
+static int
+vs_service_create_sysfs_entries(struct vs_session_device *session,
+		struct vs_service_device *service, vs_service_id_t id)
+{
+	int ret;
+	char *sysfs_name, *c;
+
+	/* Add a symlink to session device inside service device sysfs */
+	ret = sysfs_create_link(&service->dev.kobj, &session->dev.kobj,
+			VS_SESSION_SYMLINK_NAME);
+	if (ret) {
+		dev_err(&service->dev, "Error %d creating session symlink\n",
+				ret);
+		goto fail;
+	}
+
+	/* Get the length of the string for sysfs dir */
+	sysfs_name = kasprintf(GFP_KERNEL, "%s:%d", service->name, id);
+	if (!sysfs_name) {
+		ret = -ENOMEM;
+		goto fail_session_link;
+	}
+
+	/*
+	 * We dont want to create symlinks with /'s which could get interpreted
+	 * as another directory so replace all /'s with !'s
+	 */
+	while ((c = strchr(sysfs_name, '/')))
+		*c = '!';
+	ret = sysfs_create_link(session->sysfs_entry, &service->dev.kobj,
+			sysfs_name);
+	if (ret)
+		goto fail_free_sysfs_name;
+
+	service->sysfs_name = sysfs_name;
+
+	return 0;
+
+fail_free_sysfs_name:
+	kfree(sysfs_name);
+fail_session_link:
+	sysfs_remove_link(&service->dev.kobj, VS_SESSION_SYMLINK_NAME);
+fail:
+	return ret;
+}
+
+/**
+ * vs_service_register - create and register a new vs_service_device
+ * @session: the session device that is the parent of the service
+ * @owner: the service responsible for managing the new service
+ * @service_id: the ID of the new service
+ * @name: the name of the new service
+ * @protocol: the protocol for the new service
+ * @plat_data: value to be assigned to (struct device *)->platform_data
+ *
+ * This function should only be called by a session driver that is bound to
+ * the given session.
+ *
+ * The given service_id must not have been passed to a prior successful
+ * vs_service_register call, unless the service ID has since been freed by a
+ * call to the session driver's service_removed callback.
+ *
+ * The core service state lock must not be held while calling this function.
+ */
+struct vs_service_device *vs_service_register(struct vs_session_device *session,
+		struct vs_service_device *owner, vs_service_id_t service_id,
+		const char *protocol, const char *name, const void *plat_data)
+{
+	struct vs_service_device *service;
+	struct vs_session_driver *session_drv;
+	int ret = -EIO;
+	char *c;
+
+	if (service_id && !owner) {
+		dev_err(&session->dev, "Non-core service must have an owner\n");
+		ret = -EINVAL;
+		goto fail;
+	} else if (!service_id && owner) {
+		dev_err(&session->dev, "Core service must not have an owner\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (!session->dev.driver)
+		goto fail;
+
+	session_drv = to_vs_session_driver(session->dev.driver);
+
+	service = kzalloc(sizeof(*service), GFP_KERNEL);
+	if (!service) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	INIT_LIST_HEAD(&service->rx_queue);
+	INIT_WORK(&service->rx_work, service_rx_work);
+	INIT_WORK(&service->reset_work, service_reset_work);
+	INIT_DELAYED_WORK(&service->ready_work, service_ready_work);
+	INIT_DELAYED_WORK(&service->cooloff_work, service_cooloff_work);
+	INIT_WORK(&service->cleanup_work, service_cleanup_work);
+	spin_lock_init(&service->rx_lock);
+	init_waitqueue_head(&service->quota_wq);
+
+	service->owner = vs_get_service(owner);
+
+	service->readiness = VS_SERVICE_INIT;
+	mutex_init(&service->ready_lock);
+	service->driver_probed = false;
+
+	/*
+	 * Service state locks - A service is only allowed to use one of these
+	 */
+	spin_lock_init(&service->state_spinlock);
+	mutex_init(&service->state_mutex);
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	service->state_spinlock_used = false;
+	service->state_mutex_used = false;
+#endif
+
+	/* Lock ordering
+	 *
+	 * The dependency order for the various service locks is as follows:
+	 *
+	 * cooloff_work
+	 * reset_work
+	 * ready_work
+	 * ready_lock/0
+	 * rx_work/0
+	 * state_mutex/0
+	 * ready_lock/1
+	 * ...
+	 * state_mutex/n
+	 * state_spinlock
+	 *
+	 * The subclass is the service's rank in the hierarchy of
+	 * service ownership. This results in core having subclass 0 on
+	 * server-side and 1 on client-side. Services directly created
+	 * by the core will have a lock subclass value of 2 for
+	 * servers, 3 for clients. Services created by non-core
+	 * services will have a lock subclass value of x + 1, where x
+	 * is the lock subclass of the creator service. (e.g servers
+	 * will have even numbered lock subclasses, clients will have
+	 * odd numbered lock subclasses).
+	 *
+	 * If a service driver has any additional locks for protecting
+	 * internal state, they will generally fit between state_mutex/n and
+	 * ready_lock/n+1 on this list. For the core service, this applies to
+	 * the session lock.
+	 */
+
+	if (owner)
+		service->lock_subclass = owner->lock_subclass + 2;
+	else
+		service->lock_subclass = session->is_server ? 0 : 1;
+
+#ifdef CONFIG_LOCKDEP
+	if (service->lock_subclass >= MAX_LOCKDEP_SUBCLASSES) {
+		dev_warn(&session->dev, "Owner hierarchy is too deep, lockdep will fail\n");
+	} else {
+		/*
+		 * We need to set the default subclass for the rx work,
+		 * because the workqueue API doesn't (and can't) provide
+		 * anything like lock_nested() for it.
+		 */
+
+		struct lock_class_key *key = service->rx_work.lockdep_map.key;
+
+		/*
+		 * We can't use the lockdep_set_class() macro because the
+		 * work's lockdep map is called .lockdep_map instead of
+		 * .dep_map.
+		 */
+		lockdep_init_map(&service->rx_work.lockdep_map,
+				"&service->rx_work", key,
+				service->lock_subclass);
+	}
+#endif
+
+	/*
+	 * Copy the protocol and name. Remove any leading or trailing
+	 * whitespace characters (including newlines) since the strings
+	 * may have been passed via sysfs files.
+	 */
+	if (protocol) {
+		service->protocol = kstrdup(protocol, GFP_KERNEL);
+		if (!service->protocol) {
+			ret = -ENOMEM;
+			goto fail_copy_protocol;
+		}
+		c = strim(service->protocol);
+		if (c != service->protocol)
+			memmove(service->protocol, c,
+					strlen(service->protocol) + 1);
+	}
+
+	service->name = kstrdup(name, GFP_KERNEL);
+	if (!service->name) {
+		ret = -ENOMEM;
+		goto fail_copy_name;
+	}
+	c = strim(service->name);
+	if (c != service->name)
+		memmove(service->name, c, strlen(service->name) + 1);
+
+	service->is_server = session_drv->is_server;
+
+	/* Grab a reference to the session we are on */
+	service->dev.parent = get_device(&session->dev);
+	service->dev.bus = session_drv->service_bus;
+	service->dev.release = vs_service_release;
+
+	service->last_reset = 0;
+	service->last_reset_request = 0;
+	service->last_ready = 0;
+	service->reset_delay = 0;
+
+	device_initialize(&service->dev);
+	service->dev.platform_data = (void *)plat_data;
+
+	ret = service_add_idr(session, service, service_id);
+	if (ret)
+		goto fail_add_idr;
+
+#ifdef CONFIG_VSERVICES_NAMED_DEVICE
+	/* Integrate session and service names in vservice devnodes */
+	dev_set_name(&service->dev, "vservice-%s:%s:%s:%d:%d",
+			session->is_server ? "server" : "client",
+			session->name, service->name,
+			session->session_num, service->id);
+#else
+	dev_set_name(&service->dev, "%s:%d", dev_name(&session->dev),
+			service->id);
+#endif
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	if (service->id > 0)
+		service->dev.devt = MKDEV(vservices_cdev_major,
+			(session->session_num * VS_MAX_SERVICES) +
+			service->id);
+#endif
+
+	service->work_queue = vs_create_workqueue(dev_name(&service->dev));
+	if (!service->work_queue) {
+		ret = -ENOMEM;
+		goto fail_create_workqueue;
+	}
+
+	tasklet_init(&service->rx_tasklet, service_rx_tasklet,
+			(unsigned long)service);
+
+	/*
+	 * If this is the core service, set the core service pointer in the
+	 * session.
+	 */
+	if (service->id == 0) {
+		mutex_lock(&session->service_idr_lock);
+		if (session->core_service) {
+			ret = -EEXIST;
+			mutex_unlock(&session->service_idr_lock);
+			goto fail_become_core;
+		}
+
+		/* Put in vs_session_bus_remove() */
+		session->core_service = vs_get_service(service);
+		mutex_unlock(&session->service_idr_lock);
+	}
+
+	/* Notify the transport */
+	ret = session->transport->vt->service_add(session->transport, service);
+	if (ret) {
+		dev_err(&session->dev,
+				"Failed to add service %d (%s:%s) to transport: %d\n",
+				service->id, service->name,
+				service->protocol, ret);
+		goto fail_transport_add;
+	}
+
+	/* Delay uevent until vs_service_start(). */
+	dev_set_uevent_suppress(&service->dev, true);
+
+	ret = device_add(&service->dev);
+	if (ret)
+		goto fail_device_add;
+
+	/* Create the service statistics sysfs group */
+	ret = sysfs_create_group(&service->dev.kobj, &service_stat_attributes);
+	if (ret)
+		goto fail_sysfs_create_group;
+
+	/* Create additional sysfs files */
+	ret = vs_service_create_sysfs_entries(session, service, service->id);
+	if (ret)
+		goto fail_sysfs_add_entries;
+
+	return service;
+
+fail_sysfs_add_entries:
+	sysfs_remove_group(&service->dev.kobj, &service_stat_attributes);
+fail_sysfs_create_group:
+	device_del(&service->dev);
+fail_device_add:
+	session->transport->vt->service_remove(session->transport, service);
+fail_transport_add:
+	if (service->id == 0) {
+		session->core_service = NULL;
+		vs_put_service(service);
+	}
+fail_become_core:
+fail_create_workqueue:
+	vs_session_release_service_id(service);
+fail_add_idr:
+	/*
+	 * device_initialize() has been called, so we must call put_device()
+	 * and let vs_service_release() handle the rest of the cleanup.
+	 */
+	put_device(&service->dev);
+	return ERR_PTR(ret);
+
+fail_copy_name:
+    kfree(service->protocol);
+fail_copy_protocol:
+	kfree(service);
+fail:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(vs_service_register);
+
+/**
+ * vs_session_get_service - Look up a service by ID on a session and get
+ * a reference to it. The caller must call vs_put_service when it is finished
+ * with the service.
+ *
+ * @session: The session to search for the service on
+ * @service_id: ID of the service to find
+ */
+struct vs_service_device *
+vs_session_get_service(struct vs_session_device *session,
+		vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+
+	if (!session)
+		return NULL;
+
+	rcu_read_lock();
+	service = idr_find(&session->service_idr, service_id);
+	if (!service) {
+		rcu_read_unlock();
+		return NULL;
+	}
+	vs_get_service(service);
+	rcu_read_unlock();
+
+	return service;
+}
+EXPORT_SYMBOL_GPL(vs_session_get_service);
+
+/**
+ * __for_each_service - Iterate over all non-core services on a session.
+ *
+ * @session: Session to iterate services on
+ * @func: Callback function for each iterated service
+ *
+ * Iterate over all services on a session, excluding the core service, and
+ * call a callback function on each.
+ */
+static void __for_each_service(struct vs_session_device *session,
+		void (*func)(struct vs_service_device *))
+{
+	struct vs_service_device *service;
+	int id;
+
+	for (id = 1; ; id++) {
+		rcu_read_lock();
+		service = idr_get_next(&session->service_idr, &id);
+		if (!service) {
+			rcu_read_unlock();
+			break;
+		}
+		vs_get_service(service);
+		rcu_read_unlock();
+
+		func(service);
+		vs_put_service(service);
+	}
+}
+
+/**
+ * vs_session_delete_noncore - immediately delete all non-core services
+ * @session: the session whose services are to be deleted
+ *
+ * This function disables and deletes all non-core services without notifying
+ * the core service. It must only be called by the core service, with its state
+ * lock held. It is used when the core service client disconnects or
+ * resets, and when the core service server has its driver removed.
+ */
+void vs_session_delete_noncore(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service __maybe_unused =
+			session->core_service;
+
+	lockdep_assert_held(&core_service->state_mutex);
+
+	vs_session_disable_noncore(session);
+
+	__for_each_service(session, delete_service);
+}
+EXPORT_SYMBOL_GPL(vs_session_delete_noncore);
+
+/**
+ * vs_session_for_each_service - Iterate over all initialised and non-deleted
+ * non-core services on a session.
+ *
+ * @session: Session to iterate services on
+ * @func: Callback function for each iterated service
+ * @data: Extra data to pass to the callback
+ *
+ * Iterate over all services on a session, excluding the core service and any
+ * service that has been deleted or has not yet had vs_service_start() called,
+ * and call a callback function on each. The callback function is called with
+ * the service's ready lock held.
+ */
+void vs_session_for_each_service(struct vs_session_device *session,
+		void (*func)(struct vs_service_device *, void *), void *data)
+{
+	struct vs_service_device *service;
+	int id;
+
+	for (id = 1; ; id++) {
+		rcu_read_lock();
+		service = idr_get_next(&session->service_idr, &id);
+		if (!service) {
+			rcu_read_unlock();
+			break;
+		}
+		vs_get_service(service);
+		rcu_read_unlock();
+
+		mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+		if (service->readiness != VS_SERVICE_LOCAL_DELETE &&
+				service->readiness != VS_SERVICE_DELETED &&
+				service->readiness != VS_SERVICE_INIT)
+			func(service, data);
+
+		mutex_unlock(&service->ready_lock);
+		vs_put_service(service);
+	}
+}
+
+static void force_disable_service(struct vs_service_device *service,
+		void *unused)
+{
+	lockdep_assert_held(&service->ready_lock);
+
+	if (service->readiness == VS_SERVICE_ACTIVE)
+		__reset_service(service, false);
+
+	disable_service(service, true);
+}
+
+/**
+ * vs_session_disable_noncore - immediately disable all non-core services
+ * @session: the session whose services are to be disabled
+ *
+ * This function must be called by the core service driver to disable all
+ * services, whenever it resets or is otherwise disconnected. It is called
+ * directly by the server-side core service, and by the client-side core
+ * service via vs_session_delete_noncore().
+ */
+void vs_session_disable_noncore(struct vs_session_device *session)
+{
+	vs_session_for_each_service(session, force_disable_service, NULL);
+}
+EXPORT_SYMBOL_GPL(vs_session_disable_noncore);
+
+static void try_enable_service(struct vs_service_device *service, void *unused)
+{
+	lockdep_assert_held(&service->ready_lock);
+
+	__enable_service(service);
+}
+
+/**
+ * vs_session_enable_noncore - enable all disabled non-core services
+ * @session: the session whose services are to be enabled
+ *
+ * This function is called by the core server driver to enable all services
+ * when the core client connects.
+ */
+void vs_session_enable_noncore(struct vs_session_device *session)
+{
+	vs_session_for_each_service(session, try_enable_service, NULL);
+}
+EXPORT_SYMBOL_GPL(vs_session_enable_noncore);
+
+/**
+ * vs_session_handle_message - process an incoming message from a transport
+ * @session: the session that is receiving the message
+ * @mbuf: a buffer containing the message payload
+ * @service_id: the id of the service that the message was addressed to
+ *
+ * This routine will return 0 if the buffer was accepted, or a negative value
+ * otherwise. In the latter case the caller should free the buffer. If the
+ * error is fatal, this routine will reset the service.
+ *
+ * This routine may be called from interrupt context.
+ *
+ * The caller must always serialise calls to this function relative to
+ * vs_session_handle_reset and vs_session_handle_activate. We don't do this
+ * internally, to avoid having to disable interrupts when called from task
+ * context.
+ */
+int vs_session_handle_message(struct vs_session_device *session,
+		struct vs_mbuf *mbuf, vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+	struct vs_transport *transport;
+	unsigned long flags;
+
+	transport = session->transport;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service) {
+		dev_err(&session->dev, "message for unknown service %d\n",
+				service_id);
+		session_fatal_error(session, GFP_ATOMIC);
+		return -ENOTCONN;
+	}
+
+	/*
+	 * Take the rx lock before checking service readiness. This guarantees
+	 * that if __reset_service() has just made the service inactive, we
+	 * either see it and don't enqueue the message, or else enqueue the
+	 * message before cancel_pending_rx() runs (and removes it).
+	 */
+	spin_lock_irqsave(&service->rx_lock, flags);
+
+	/* If the service is not active, drop the message. */
+	if (service->readiness != VS_SERVICE_ACTIVE) {
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+		vs_put_service(service);
+		return -ECONNRESET;
+	}
+
+	list_add_tail(&mbuf->queue, &service->rx_queue);
+	spin_unlock_irqrestore(&service->rx_lock, flags);
+
+	/* Schedule processing of the message by the service's drivers. */
+	queue_rx_work(service);
+	vs_put_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_message);
+
+/**
+ * vs_session_quota_available - notify a service that it can transmit
+ * @session: the session owning the service that is ready
+ * @service_id: the id of the service that is ready
+ * @count: the number of buffers that just became ready
+ * @call_tx_ready: true if quota has just become nonzero due to a buffer being
+ *                 freed by the remote communication partner
+ *
+ * This routine is called by the transport driver when a send-direction
+ * message buffer becomes free. It wakes up any task that is waiting for
+ * send quota to become available.
+ *
+ * This routine may be called from interrupt context from the transport
+ * driver, and as such, it may not sleep.
+ *
+ * The caller must always serialise calls to this function relative to
+ * vs_session_handle_reset and vs_session_handle_activate. We don't do this
+ * internally, to avoid having to disable interrupts when called from task
+ * context.
+ *
+ * If the call_tx_ready argument is true, this function also schedules a
+ * call to the driver's tx_ready callback. Note that this never has priority
+ * over handling incoming messages; it will only be handled once the receive
+ * queue is empty. This is to increase batching of outgoing messages, and also
+ * to reduce the chance that an outgoing message will be dropped by the partner
+ * because an incoming message has already changed the state.
+ *
+ * In general, task context drivers should use the waitqueue, and softirq
+ * context drivers (with tx_atomic set) should use tx_ready.
+ */
+void vs_session_quota_available(struct vs_session_device *session,
+		vs_service_id_t service_id, unsigned count,
+		bool send_tx_ready)
+{
+	struct vs_service_device *service;
+	unsigned long flags;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service) {
+		dev_err(&session->dev, "tx ready for unknown service %d\n",
+				service_id);
+		session_fatal_error(session, GFP_ATOMIC);
+		return;
+	}
+
+	wake_up_nr(&service->quota_wq, count);
+
+	if (send_tx_ready) {
+		/*
+		 * Take the rx lock before checking service readiness. This
+		 * guarantees that if __reset_service() has just made the
+		 * service inactive, we either see it and don't set the tx_ready
+		 * flag, or else set the flag before cancel_pending_rx() runs
+		 * (and clears it).
+		 */
+		spin_lock_irqsave(&service->rx_lock, flags);
+
+		/* If the service is not active, drop the tx_ready event */
+		if (service->readiness != VS_SERVICE_ACTIVE) {
+			spin_unlock_irqrestore(&service->rx_lock, flags);
+			vs_put_service(service);
+			return;
+		}
+
+		service->tx_ready = true;
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+
+		/* Schedule RX processing by the service driver. */
+		queue_rx_work(service);
+	}
+
+	vs_put_service(service);
+}
+EXPORT_SYMBOL_GPL(vs_session_quota_available);
+
+/**
+ * vs_session_handle_notify - process an incoming notification from a transport
+ * @session: the session that is receiving the notification
+ * @flags: notification flags
+ * @service_id: the id of the service that the notification was addressed to
+ *
+ * This function may be called from interrupt context from the transport driver,
+ * and as such, it may not sleep.
+ */
+void vs_session_handle_notify(struct vs_session_device *session,
+		unsigned long bits, vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+	struct vs_service_driver *driver;
+	unsigned long flags;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service) {
+		/* Ignore the notification since the service id doesn't exist */
+		dev_err(&session->dev, "notification for unknown service %d\n",
+				service_id);
+		return;
+	}
+
+	/*
+	 * Take the rx lock before checking service readiness. This guarantees
+	 * that if __reset_service() has just made the service inactive, we
+	 * either see it and don't send the notification, or else send it
+	 * before cancel_pending_rx() runs (and thus before the driver is
+	 * deactivated).
+	 */
+	spin_lock_irqsave(&service->rx_lock, flags);
+
+	/* If the service is not active, drop the notification. */
+	if (service->readiness != VS_SERVICE_ACTIVE) {
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+		vs_put_service(service);
+		return;
+	}
+
+	/* There should be a driver bound on the service */
+	if (WARN_ON(!service->dev.driver)) {
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+		vs_put_service(service);
+		return;
+	}
+
+	driver = to_vs_service_driver(service->dev.driver);
+	/* Call the driver's notify function */
+	driver->notify(service, bits);
+
+	spin_unlock_irqrestore(&service->rx_lock, flags);
+	vs_put_service(service);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_notify);
+
+static unsigned long reset_cool_off(struct vs_service_device *service)
+{
+	return service->reset_delay * RESET_THROTTLE_COOL_OFF_MULT;
+}
+
+static bool ready_needs_delay(struct vs_service_device *service)
+{
+	/*
+	 * We throttle resets if too little time elapsed between the service
+	 * last becoming ready, and the service last starting a reset.
+	 *
+	 * We do not use the current time here because it includes the time
+	 * taken by the local service driver to actually process the reset.
+	 */
+	return service->last_reset && service->last_ready && time_before(
+			service->last_reset,
+			service->last_ready + RESET_THROTTLE_TIME);
+}
+
+static bool reset_throttle_cooled_off(struct vs_service_device *service)
+{
+	/*
+	 * Reset throttling cools off if enough time has elapsed since the
+	 * last reset request.
+	 *
+	 * We check against the last requested reset, not the last serviced
+	 * reset or ready. If we are throttling, a reset may not have been
+	 * serviced for some time even though we are still receiving requests.
+	 */
+	return service->reset_delay && service->last_reset_request &&
+			time_after(jiffies, service->last_reset_request +
+					reset_cool_off(service));
+}
+
+/*
+ * Queue up the ready work for a service. If a service is resetting too fast
+ * then it will be throttled using an exponentially increasing delay before
+ * marking it ready. If the reset speed backs off then the ready throttling
+ * will be cleared. If a service reaches the maximum throttling delay then all
+ * resets will be ignored until the cool off period has elapsed.
+ *
+ * The basic logic of the reset throttling is:
+ *
+ *  - If a reset request is processed and the last ready was less than
+ *    RESET_THROTTLE_TIME ago, then the ready needs to be delayed to
+ *    throttle resets.
+ *
+ *  - The ready delay increases exponentially on each throttled reset
+ *    between RESET_THROTTLE_MIN and RESET_THROTTLE_MAX.
+ *
+ *  - If RESET_THROTTLE_MAX is reached then no ready will be sent until the
+ *    reset requests have cooled off.
+ *
+ *  - Reset requests have cooled off when no reset requests have been
+ *    received for RESET_THROTTLE_COOL_OFF_MULT * the service's current
+ *    ready delay. The service's reset throttling is disabled.
+ *
+ * Note: Be careful when adding print statements, including debugging, to
+ * this function. The ready throttling is intended to prevent DOSing of the
+ * vServices due to repeated resets (e.g. because of a persistent failure).
+ * Adding a printk on each reset for example would reset in syslog spamming
+ * which is a DOS attack in itself.
+ *
+ * The ready lock must be held by the caller.
+ */
+static void queue_ready_work(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	unsigned long delay;
+	bool wait_for_cooloff = false;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	/* This should only be called when the service enters reset. */
+	WARN_ON(service->readiness != VS_SERVICE_RESET);
+
+	if (ready_needs_delay(service)) {
+		/* Reset delay increments exponentially */
+		if (!service->reset_delay) {
+			service->reset_delay = RESET_THROTTLE_MIN;
+		} else if (service->reset_delay < RESET_THROTTLE_MAX) {
+			service->reset_delay *= 2;
+		} else {
+			wait_for_cooloff = true;
+		}
+
+		delay = service->reset_delay;
+	} else {
+		/* The reset request appears to have been be sane. */
+		delay = 0;
+
+	}
+
+	if (service->reset_delay > 0) {
+		/*
+		 * Schedule cooloff work, to set the reset_delay to 0 if
+		 * the reset requests stop for long enough.
+		 */
+		schedule_delayed_work(&service->cooloff_work,
+				reset_cool_off(service));
+	}
+
+	if (wait_for_cooloff) {
+		/*
+		 * We need to finish cooling off before we service resets
+		 * again. Schedule cooloff_work to run after the current
+		 * cooloff period ends; it may reschedule itself even later
+		 * if any more requests arrive.
+		 */
+		dev_err(&session->dev,
+				"Service %s is resetting too fast - must cool off for %u ms\n",
+				dev_name(&service->dev),
+				jiffies_to_msecs(reset_cool_off(service)));
+		return;
+	}
+
+	if (delay)
+		dev_err(&session->dev,
+				"Service %s is resetting too fast - delaying ready by %u ms\n",
+				dev_name(&service->dev),
+				jiffies_to_msecs(delay));
+
+	vs_debug(VS_DEBUG_SESSION, session,
+			"Service %s will become ready in %u ms\n",
+			dev_name(&service->dev),
+			jiffies_to_msecs(delay));
+
+	if (service->last_ready)
+		vs_debug(VS_DEBUG_SESSION, session,
+				"Last became ready %u ms ago\n",
+				msecs_ago(service->last_ready));
+	if (service->reset_delay >= RESET_THROTTLE_MAX)
+		dev_err(&session->dev, "Service %s hit max reset throttle\n",
+				dev_name(&service->dev));
+
+	schedule_delayed_work(&service->ready_work, delay);
+}
+
+static void session_activation_work(struct work_struct *work)
+{
+	struct vs_session_device *session = container_of(work,
+			struct vs_session_device, activation_work);
+	struct vs_service_device *core_service = session->core_service;
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	int activation_state;
+	int ret;
+
+	if (WARN_ON(!core_service))
+		return;
+
+	if (WARN_ON(!session_drv))
+		return;
+
+	/*
+	 * We use an atomic to prevent duplicate activations if we race with
+	 * an activate after a reset. This is very unlikely, but possible if
+	 * this work item is preempted.
+	 */
+	activation_state = atomic_cmpxchg(&session->activation_state,
+			VS_SESSION_ACTIVATE, VS_SESSION_ACTIVE);
+
+	switch (activation_state) {
+	case VS_SESSION_ACTIVATE:
+		vs_debug(VS_DEBUG_SESSION, session,
+				"core service will be activated\n");
+		vs_service_enable(core_service);
+		break;
+
+	case VS_SESSION_RESET:
+		vs_debug(VS_DEBUG_SESSION, session,
+				"core service will be deactivated\n");
+
+		/* Handle the core service reset */
+		ret = service_handle_reset(session, core_service, true);
+
+		/* Tell the transport if the reset succeeded */
+		if (ret >= 0)
+			session->transport->vt->ready(session->transport);
+		else
+			dev_err(&session->dev, "core service reset unhandled: %d\n",
+					ret);
+
+		break;
+
+	default:
+		vs_debug(VS_DEBUG_SESSION, session,
+				"core service already active\n");
+		break;
+	}
+}
+
+/**
+ * vs_session_handle_reset - Handle a reset at the session layer.
+ * @session: Session to reset
+ *
+ * This function is called by the transport when it receives a transport-level
+ * reset notification.
+ *
+ * After a session is reset by calling this function, it will reset all of its
+ * attached services, and then call the transport's ready callback. The
+ * services will remain in reset until the session is re-activated by a call
+ * to vs_session_handle_activate().
+ *
+ * Calling this function on a session that is already reset is permitted, as
+ * long as the transport accepts the consequent duplicate ready callbacks.
+ *
+ * A newly created session is initially in the reset state, and will not call
+ * the transport's ready callback. The transport may choose to either act as
+ * if the ready callback had been called, or call this function again to
+ * trigger a new ready callback.
+ */
+void vs_session_handle_reset(struct vs_session_device *session)
+{
+	atomic_set(&session->activation_state, VS_SESSION_RESET);
+
+	schedule_work(&session->activation_work);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_reset);
+
+/**
+ * vs_session_handle_activate - Allow a session to leave the reset state.
+ * @session: Session to mark active.
+ *
+ * This function is called by the transport when a transport-level reset is
+ * completed; that is, after the session layer has reset its services and
+ * called the ready callback, at *both* ends of the connection.
+ */
+void vs_session_handle_activate(struct vs_session_device *session)
+{
+	atomic_set(&session->activation_state, VS_SESSION_ACTIVATE);
+
+	schedule_work(&session->activation_work);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_activate);
+
+static ssize_t id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", session->session_num);
+}
+
+static DEVICE_ATTR_RO(id);
+
+/*
+ * The vServices session device type
+ */
+static ssize_t is_server_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", session->is_server);
+}
+
+static DEVICE_ATTR_RO(is_server);
+
+static ssize_t name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", session->name);
+}
+
+static DEVICE_ATTR_RO(name);
+
+#ifdef CONFIG_VSERVICES_DEBUG
+static ssize_t debug_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%.8lx\n", session->debug_mask);
+}
+
+static ssize_t debug_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	int err;
+
+	err = kstrtoul(buf, 0, &session->debug_mask);
+	if (err)
+		return err;
+
+	/* Clear any bits we don't know about */
+	session->debug_mask &= VS_DEBUG_ALL;
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(debug_mask);
+
+#endif /* CONFIG_VSERVICES_DEBUG */
+
+static struct attribute *vservices_session_dev_attrs[] = {
+	&dev_attr_id.attr,
+	&dev_attr_is_server.attr,
+	&dev_attr_name.attr,
+#ifdef CONFIG_VSERVICES_DEBUG
+	&dev_attr_debug_mask.attr,
+#endif
+	NULL,
+};
+ATTRIBUTE_GROUPS(vservices_session_dev);
+
+static int vs_session_free_idr(struct vs_session_device *session)
+{
+	mutex_lock(&vs_session_lock);
+	idr_remove(&session_idr, session->session_num);
+	mutex_unlock(&vs_session_lock);
+	return 0;
+}
+
+static void vs_session_device_release(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	vs_session_free_idr(session);
+
+	kfree(session->name);
+	kfree(session);
+}
+
+/*
+ * The vServices session bus
+ */
+static int vs_session_bus_match(struct device *dev,
+		struct device_driver *driver)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_session_driver *session_drv = to_vs_session_driver(driver);
+
+	return (session->is_server == session_drv->is_server);
+}
+
+static int vs_session_bus_remove(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_service_device *core_service = session->core_service;
+
+	if (!core_service)
+		return 0;
+
+	/*
+	 * Abort any pending session activation. We rely on the transport to
+	 * not call vs_session_handle_activate after this point.
+	 */
+	cancel_work_sync(&session->activation_work);
+
+	/* Abort any pending fatal error handling, which is redundant now. */
+	cancel_work_sync(&session->fatal_error_work);
+
+	/*
+	 * Delete the core service. This will implicitly delete everything
+	 * else (in reset on the client side, and in release on the server
+	 * side). The session holds a reference, so this won't release the
+	 * service struct.
+	 */
+	delete_service(core_service);
+
+	/* Now clean up the core service. */
+	session->core_service = NULL;
+
+	/* Matches the get in vs_service_register() */
+	vs_put_service(core_service);
+
+	return 0;
+}
+
+static int vservices_session_uevent(struct device *dev,
+		struct kobj_uevent_env *env)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	dev_dbg(dev, "uevent\n");
+
+	if (add_uevent_var(env, "IS_SERVER=%d", session->is_server))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SESSION_ID=%d", session->session_num))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void vservices_session_shutdown(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	dev_dbg(dev, "shutdown\n");
+
+	/* Do a transport reset */
+	session->transport->vt->reset(session->transport);
+}
+
+struct bus_type vs_session_bus_type = {
+	.name		= "vservices-session",
+	.match		= vs_session_bus_match,
+	.remove		= vs_session_bus_remove,
+	.dev_groups	= vservices_session_dev_groups,
+	.uevent		= vservices_session_uevent,
+	.shutdown	= vservices_session_shutdown,
+};
+EXPORT_SYMBOL_GPL(vs_session_bus_type);
+
+/*
+ * Common code for the vServices client and server buses
+ */
+int vs_service_bus_probe(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(dev->driver);
+	struct vs_session_device *session = vs_service_get_session(service);
+	int ret;
+
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev, "probe\n");
+
+	/*
+	 * Increase the reference count on the service driver. We don't allow
+	 * service driver modules to be removed if there are any device
+	 * instances present. The devices must be explicitly removed first.
+	 */
+	if (!try_module_get(vsdrv->driver.owner))
+		return -ENODEV;
+
+	ret = vsdrv->probe(service);
+	if (ret) {
+		module_put(vsdrv->driver.owner);
+		return ret;
+	}
+
+	service->driver_probed = true;
+
+	try_start_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_probe);
+
+int vs_service_bus_remove(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(dev->driver);
+
+	reset_service(service);
+
+	/* Prevent reactivation of the driver */
+	service->driver_probed = false;
+
+	/* The driver has now had its reset() callback called; remove it */
+	vsdrv->remove(service);
+
+	/*
+	 * Take the service's state mutex and spinlock. This ensures that any
+	 * thread that is calling vs_state_lock_safe[_bh] will either complete
+	 * now, or see the driver removal and fail, irrespective of which type
+	 * of lock it is using.
+	 */
+	mutex_lock_nested(&service->state_mutex, service->lock_subclass);
+	spin_lock_bh(&service->state_spinlock);
+
+	/* Release all the locks. */
+	spin_unlock_bh(&service->state_spinlock);
+	mutex_unlock(&service->state_mutex);
+	mutex_unlock(&service->ready_lock);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	service->state_spinlock_used = false;
+	service->state_mutex_used = false;
+#endif
+
+	module_put(vsdrv->driver.owner);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_remove);
+
+int vs_service_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	dev_dbg(dev, "uevent\n");
+
+	if (add_uevent_var(env, "IS_SERVER=%d", service->is_server))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SERVICE_ID=%d", service->id))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SESSION_ID=%d", session->session_num))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SERVICE_NAME=%s", service->name))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "PROTOCOL=%s", service->protocol ?: ""))
+		return -ENOMEM;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_uevent);
+
+static int vs_session_create_sysfs_entry(struct vs_transport *transport,
+		struct vs_session_device *session, bool server,
+		const char *transport_name)
+{
+	char *sysfs_name;
+	struct kobject *sysfs_parent = vservices_client_root;
+
+	if (!transport_name)
+		return -EINVAL;
+
+	sysfs_name = kasprintf(GFP_KERNEL, "%s:%s", transport->type,
+			transport_name);
+	if (!sysfs_name)
+		return -ENOMEM;
+
+	if (server)
+		sysfs_parent = vservices_server_root;
+
+	session->sysfs_entry = kobject_create_and_add(sysfs_name, sysfs_parent);
+
+	kfree(sysfs_name);
+	if (!session->sysfs_entry)
+		return -ENOMEM;
+	return 0;
+}
+
+static int vs_session_alloc_idr(struct vs_session_device *session)
+{
+	int id;
+
+	mutex_lock(&vs_session_lock);
+	id = idr_alloc(&session_idr, session, 0, VS_MAX_SESSIONS, GFP_KERNEL);
+	mutex_unlock(&vs_session_lock);
+
+	if (id == -ENOSPC)
+		return -EBUSY;
+	else if (id < 0)
+		return id;
+
+	session->session_num = id;
+	return 0;
+}
+
+/**
+ * vs_session_register - register a vservices session on a transport
+ * @transport: vservices transport that the session will attach to
+ * @parent: device that implements the transport (for sysfs)
+ * @server: true if the session is server-side
+ * @transport_name: name of the transport
+ *
+ * This function is intended to be called from the probe() function of a
+ * transport driver. It sets up a new session device, which then either
+ * performs automatic service discovery (for clients) or creates sysfs nodes
+ * that allow the user to create services (for servers).
+ *
+ * Note that the parent is only used by the driver framework; it is not
+ * directly accessed by the session drivers. Thus, a single transport device
+ * can support multiple sessions, as long as they each have a unique struct
+ * vs_transport.
+ *
+ * Note: This function may sleep, and therefore must not be called from
+ * interrupt context.
+ *
+ * Returns a pointer to the new device, or an error pointer.
+ */
+struct vs_session_device *vs_session_register(struct vs_transport *transport,
+		struct device *parent, bool server, const char *transport_name)
+{
+	struct device *dev;
+	struct vs_session_device *session;
+	int ret = -ENOMEM;
+
+	WARN_ON(!transport);
+
+	session = kzalloc(sizeof(*session), GFP_KERNEL);
+	if (!session)
+		goto fail_session_alloc;
+
+	session->transport = transport;
+	session->is_server = server;
+	session->name = kstrdup(transport_name, GFP_KERNEL);
+	if (!session->name)
+		goto fail_free_session;
+
+	INIT_WORK(&session->activation_work, session_activation_work);
+	INIT_WORK(&session->fatal_error_work, session_fatal_error_work);
+
+#ifdef CONFIG_VSERVICES_DEBUG
+	session->debug_mask = default_debug_mask & VS_DEBUG_ALL;
+#endif
+
+	idr_init(&session->service_idr);
+	mutex_init(&session->service_idr_lock);
+
+	/*
+	 * We must create session sysfs entry before device_create
+	 * so, that sysfs entry is available while registering
+	 * core service.
+	 */
+	ret = vs_session_create_sysfs_entry(transport, session, server,
+			transport_name);
+	if (ret)
+		goto fail_free_session;
+
+	ret = vs_session_alloc_idr(session);
+	if (ret)
+		goto fail_sysfs_entry;
+
+	dev = &session->dev;
+	dev->parent = parent;
+	dev->bus = &vs_session_bus_type;
+	dev->release = vs_session_device_release;
+	dev_set_name(dev, "vservice:%d", session->session_num);
+
+	ret = device_register(dev);
+	if (ret) {
+		goto fail_session_map;
+	}
+
+	/* Add a symlink to transport device inside session device sysfs dir */
+	if (parent) {
+		ret = sysfs_create_link(&session->dev.kobj,
+				&parent->kobj, VS_TRANSPORT_SYMLINK_NAME);
+		if (ret) {
+			dev_err(&session->dev,
+					"Error %d creating transport symlink\n",
+					ret);
+			goto fail_session_device_unregister;
+		}
+	}
+
+	return session;
+
+fail_session_device_unregister:
+	device_unregister(&session->dev);
+	kobject_put(session->sysfs_entry);
+	/* Remaining cleanup will be done in vs_session_release */
+	return ERR_PTR(ret);
+fail_session_map:
+	vs_session_free_idr(session);
+fail_sysfs_entry:
+	kobject_put(session->sysfs_entry);
+fail_free_session:
+	kfree(session->name);
+	kfree(session);
+fail_session_alloc:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(vs_session_register);
+
+void vs_session_start(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service = session->core_service;
+
+	if (WARN_ON(!core_service))
+		return;
+
+	blocking_notifier_call_chain(&vs_session_notifier_list,
+			VS_SESSION_NOTIFY_ADD, session);
+
+	vs_service_start(core_service);
+}
+EXPORT_SYMBOL_GPL(vs_session_start);
+
+/**
+ * vs_session_unregister - unregister a session device
+ * @session: the session device to unregister
+ */
+void vs_session_unregister(struct vs_session_device *session)
+{
+	if (session->dev.parent)
+		sysfs_remove_link(&session->dev.kobj, VS_TRANSPORT_SYMLINK_NAME);
+	blocking_notifier_call_chain(&vs_session_notifier_list,
+			VS_SESSION_NOTIFY_REMOVE, session);
+
+	device_unregister(&session->dev);
+
+	kobject_put(session->sysfs_entry);
+}
+EXPORT_SYMBOL_GPL(vs_session_unregister);
+
+struct service_unbind_work_struct {
+	struct vs_service_device *service;
+	struct work_struct work;
+};
+
+static void service_unbind_work(struct work_struct *work)
+{
+	struct service_unbind_work_struct *unbind_work = container_of(work,
+			struct service_unbind_work_struct, work);
+
+	device_release_driver(&unbind_work->service->dev);
+
+	/* Matches vs_get_service() in vs_session_unbind_driver() */
+	vs_put_service(unbind_work->service);
+	kfree(unbind_work);
+}
+
+int vs_session_unbind_driver(struct vs_service_device *service)
+{
+	struct service_unbind_work_struct *unbind_work =
+			kmalloc(sizeof(*unbind_work), GFP_KERNEL);
+
+	if (!unbind_work)
+		return -ENOMEM;
+
+	INIT_WORK(&unbind_work->work, service_unbind_work);
+
+	/* Put in service_unbind_work() */
+	unbind_work->service = vs_get_service(service);
+	schedule_work(&unbind_work->work);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_session_unbind_driver);
+
+static int __init vservices_init(void)
+{
+	int r;
+
+	printk(KERN_INFO "vServices Framework 1.0\n");
+
+	vservices_root = kobject_create_and_add("vservices", NULL);
+	if (!vservices_root) {
+		r = -ENOMEM;
+		goto fail_create_root;
+	}
+
+	r = bus_register(&vs_session_bus_type);
+	if (r < 0)
+		goto fail_bus_register;
+
+	r = vs_devio_init();
+	if (r < 0)
+		goto fail_devio_init;
+
+	return 0;
+
+fail_devio_init:
+	bus_unregister(&vs_session_bus_type);
+fail_bus_register:
+	kobject_put(vservices_root);
+fail_create_root:
+	return r;
+}
+
+static void __exit vservices_exit(void)
+{
+	printk(KERN_INFO "vServices Framework exit\n");
+
+	vs_devio_exit();
+	bus_unregister(&vs_session_bus_type);
+	kobject_put(vservices_root);
+}
+
+subsys_initcall(vservices_init);
+module_exit(vservices_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Session");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/session.h b/drivers/vservices/session.h
new file mode 100644
index 0000000..f51d535
--- /dev/null
+++ b/drivers/vservices/session.h
@@ -0,0 +1,173 @@
+/*
+ * drivers/vservices/session.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Definitions related to the vservices session bus and its client and server
+ * session drivers. The interfaces in this file are implementation details of
+ * the vServices framework and should not be used by transport or service
+ * drivers.
+ */
+
+#ifndef _VSERVICES_SESSION_PRIV_H_
+#define _VSERVICES_SESSION_PRIV_H_
+
+/* Maximum number of sessions allowed */
+#define VS_MAX_SESSIONS 64
+
+#include "debug.h"
+
+/* For use by the core server */
+#define VS_SERVICE_AUTO_ALLOCATE_ID	0xffff
+#define VS_SERVICE_ALREADY_RESET	1
+
+/*
+ * The upper bits of the service id are reserved for transport driver specific
+ * use. The reserve bits are always zeroed out above the transport layer.
+ */
+#define VS_SERVICE_ID_TRANSPORT_BITS	4
+#define VS_SERVICE_ID_TRANSPORT_OFFSET	12
+#define VS_SERVICE_ID_TRANSPORT_MASK ((1 << VS_SERVICE_ID_TRANSPORT_BITS) - 1)
+#define VS_SERVICE_ID_MASK \
+	(~(VS_SERVICE_ID_TRANSPORT_MASK << VS_SERVICE_ID_TRANSPORT_OFFSET))
+
+/* Number of bits needed to represent the service id range as a bitmap. */
+#define VS_SERVICE_ID_BITMAP_BITS \
+	(1 << ((sizeof(vs_service_id_t) * 8) - VS_SERVICE_ID_TRANSPORT_BITS))
+
+/* High service ids are reserved for use by the transport drivers */
+#define VS_SERVICE_ID_RESERVED(x) \
+	((1 << VS_SERVICE_ID_TRANSPORT_OFFSET) - (x))
+
+#define VS_SERVICE_ID_RESERVED_1	VS_SERVICE_ID_RESERVED(1)
+
+/* Name of the session device symlink in service device sysfs directory */
+#define VS_SESSION_SYMLINK_NAME		"session"
+
+/* Name of the transport device symlink in session device sysfs directory */
+#define VS_TRANSPORT_SYMLINK_NAME	"transport"
+
+static inline unsigned int
+vs_get_service_id_reserved_bits(vs_service_id_t service_id)
+{
+	return (service_id >> VS_SERVICE_ID_TRANSPORT_OFFSET) &
+			VS_SERVICE_ID_TRANSPORT_MASK;
+}
+
+static inline vs_service_id_t vs_get_real_service_id(vs_service_id_t service_id)
+{
+	return service_id & VS_SERVICE_ID_MASK;
+}
+
+static inline void vs_set_service_id_reserved_bits(vs_service_id_t *service_id,
+		unsigned int reserved_bits)
+{
+	*service_id &= ~(VS_SERVICE_ID_TRANSPORT_MASK <<
+			VS_SERVICE_ID_TRANSPORT_OFFSET);
+	*service_id |= (reserved_bits & VS_SERVICE_ID_TRANSPORT_MASK) <<
+			VS_SERVICE_ID_TRANSPORT_OFFSET;
+}
+
+extern struct bus_type vs_session_bus_type;
+extern struct kobject *vservices_root;
+extern struct kobject *vservices_server_root;
+extern struct kobject *vservices_client_root;
+
+/**
+ * struct vs_session_driver - Session driver
+ * @driver: Linux device model driver structure
+ * @service_bus: Pointer to either the server or client bus type
+ * @is_server: True if this driver is for a server session, false if it is for
+ * a client session
+ * @service_added: Called when a non-core service is added.
+ * @service_start: Called when a non-core service is started.
+ * @service_local_reset: Called when an active non-core service driver becomes
+ * inactive.
+ * @service_removed: Called when a non-core service is removed.
+ */
+struct vs_session_driver {
+	struct device_driver driver;
+	struct bus_type *service_bus;
+	bool is_server;
+
+	/* These are all called with the core service state lock held. */
+	int (*service_added)(struct vs_session_device *session,
+			struct vs_service_device *service);
+	int (*service_start)(struct vs_session_device *session,
+			struct vs_service_device *service);
+	int (*service_local_reset)(struct vs_session_device *session,
+			struct vs_service_device *service);
+	int (*service_removed)(struct vs_session_device *session,
+			struct vs_service_device *service);
+};
+
+#define to_vs_session_driver(drv) \
+	container_of(drv, struct vs_session_driver, driver)
+
+/* Service lookup */
+extern struct vs_service_device * vs_session_get_service(
+		struct vs_session_device *session,
+		vs_service_id_t service_id);
+
+/* Service creation & destruction */
+extern struct vs_service_device *
+vs_service_register(struct vs_session_device *session,
+		struct vs_service_device *parent,
+		vs_service_id_t service_id,
+		const char *protocol,
+		const char *name,
+		const void *plat_data);
+
+extern bool vs_service_start(struct vs_service_device *service);
+
+extern int vs_service_delete(struct vs_service_device *service,
+		struct vs_service_device *caller);
+
+extern int vs_service_handle_delete(struct vs_service_device *service);
+
+/* Service reset handling */
+extern int vs_service_handle_reset(struct vs_session_device *session,
+		vs_service_id_t service_id, bool disable);
+extern int vs_service_enable(struct vs_service_device *service);
+
+extern void vs_session_enable_noncore(struct vs_session_device *session);
+extern void vs_session_disable_noncore(struct vs_session_device *session);
+extern void vs_session_delete_noncore(struct vs_session_device *session);
+
+/* Service bus driver management */
+extern int vs_service_bus_probe(struct device *dev);
+extern int vs_service_bus_remove(struct device *dev);
+extern int vs_service_bus_uevent(struct device *dev,
+		struct kobj_uevent_env *env);
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+
+extern int vs_devio_init(void);
+extern void vs_devio_exit(void);
+
+extern struct vs_service_device *vs_service_lookup_by_devt(dev_t dev);
+
+extern struct vs_service_driver vs_devio_server_driver;
+extern struct vs_service_driver vs_devio_client_driver;
+
+extern int vservices_cdev_major;
+
+#else /* !CONFIG_VSERVICES_CHAR_DEV */
+
+static inline int vs_devio_init(void)
+{
+	return 0;
+}
+
+static inline void vs_devio_exit(void)
+{
+}
+
+#endif /* !CONFIG_VSERVICES_CHAR_DEV */
+
+#endif /* _VSERVICES_SESSION_PRIV_H_ */
diff --git a/drivers/vservices/skeleton_driver.c b/drivers/vservices/skeleton_driver.c
new file mode 100644
index 0000000..cfbc5df
--- /dev/null
+++ b/drivers/vservices/skeleton_driver.c
@@ -0,0 +1,133 @@
+/*
+ * drivers/vservices/skeleton_driver.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Skeleton testing driver for templating vService client/server drivers
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+struct skeleton_info {
+	unsigned dummy;
+};
+
+static void vs_skeleton_handle_start(struct vs_service_device *service)
+{
+	/* NOTE: Do not change this message - is it used for system testing */
+	dev_info(&service->dev, "skeleton handle_start\n");
+}
+
+static int vs_skeleton_handle_message(struct vs_service_device *service,
+					  struct vs_mbuf *mbuf)
+{
+	dev_info(&service->dev, "skeleton handle_messasge\n");
+	return -EBADMSG;
+}
+
+static void vs_skeleton_handle_notify(struct vs_service_device *service,
+					  u32 flags)
+{
+	dev_info(&service->dev, "skeleton handle_notify\n");
+}
+
+static void vs_skeleton_handle_reset(struct vs_service_device *service)
+{
+	dev_info(&service->dev, "skeleton handle_reset %s service %d\n",
+			service->is_server ? "server" : "client", service->id);
+}
+
+static int vs_skeleton_probe(struct vs_service_device *service)
+{
+	struct skeleton_info *info;
+	int err = -ENOMEM;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		goto fail;
+
+	dev_set_drvdata(&service->dev, info);
+	return 0;
+
+fail:
+	return err;
+}
+
+static int vs_skeleton_remove(struct vs_service_device *service)
+{
+	struct skeleton_info *info = dev_get_drvdata(&service->dev);
+
+	dev_info(&service->dev, "skeleton remove\n");
+	kfree(info);
+	return 0;
+}
+
+static struct vs_service_driver server_skeleton_driver = {
+	.protocol	= "com.ok-labs.skeleton",
+	.is_server	= true,
+	.probe		= vs_skeleton_probe,
+	.remove		= vs_skeleton_remove,
+	.start		= vs_skeleton_handle_start,
+	.receive	= vs_skeleton_handle_message,
+	.notify		= vs_skeleton_handle_notify,
+	.reset		= vs_skeleton_handle_reset,
+	.driver		= {
+		.name		= "vs-server-skeleton",
+		.owner		= THIS_MODULE,
+		.bus		= &vs_server_bus_type,
+	},
+};
+
+static struct vs_service_driver client_skeleton_driver = {
+	.protocol	= "com.ok-labs.skeleton",
+	.is_server	= false,
+	.probe		= vs_skeleton_probe,
+	.remove		= vs_skeleton_remove,
+	.start		= vs_skeleton_handle_start,
+	.receive	= vs_skeleton_handle_message,
+	.notify		= vs_skeleton_handle_notify,
+	.reset		= vs_skeleton_handle_reset,
+	.driver		= {
+		.name		= "vs-client-skeleton",
+		.owner		= THIS_MODULE,
+		.bus		= &vs_client_bus_type,
+	},
+};
+
+static int __init vs_skeleton_init(void)
+{
+	int ret;
+
+	ret = driver_register(&server_skeleton_driver.driver);
+	if (ret)
+		return ret;
+
+	ret = driver_register(&client_skeleton_driver.driver);
+	if (ret)
+		driver_unregister(&server_skeleton_driver.driver);
+
+	return ret;
+}
+
+static void __exit vs_skeleton_exit(void)
+{
+	driver_unregister(&server_skeleton_driver.driver);
+	driver_unregister(&client_skeleton_driver.driver);
+}
+
+module_init(vs_skeleton_init);
+module_exit(vs_skeleton_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Skeleton Client/Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/transport.h b/drivers/vservices/transport.h
new file mode 100644
index 0000000..8e5055c
--- /dev/null
+++ b/drivers/vservices/transport.h
@@ -0,0 +1,40 @@
+/*
+ * include/vservices/transport.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the private interface that vServices transport drivers
+ * must provide to the vservices session and protocol layers. The transport,
+ * transport vtable, and message buffer structures are defined in the public
+ * <vservices/transport.h> header.
+ */
+
+#ifndef _VSERVICES_TRANSPORT_PRIV_H_
+#define _VSERVICES_TRANSPORT_PRIV_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+
+/**
+ * struct vs_notify_info - Notification information stored in the transport
+ * @service_id: Service id for this notification info
+ * @offset: Offset into the notification mapping
+ */
+struct vs_notify_info {
+	vs_service_id_t service_id;
+	unsigned offset;
+};
+
+#define VS_MAX_SERVICES		128
+#define VS_MAX_SERVICE_ID	(VS_MAX_SERVICES - 1)
+
+#endif /* _VSERVICES_TRANSPORT_PRIV_H_ */
diff --git a/drivers/vservices/transport/Kconfig b/drivers/vservices/transport/Kconfig
new file mode 100644
index 0000000..37e84c4
--- /dev/null
+++ b/drivers/vservices/transport/Kconfig
@@ -0,0 +1,20 @@
+#
+# vServices Transport driver configuration
+#
+
+menu "Transport drivers"
+
+config VSERVICES_OKL4_AXON
+	tristate "OKL4 Microvisor Axon driver"
+	depends on VSERVICES_SUPPORT && OKL4_GUEST
+	default y
+	help
+	  This option adds support for Virtual Services sessions using an OKL4
+	  Microvisor Axon object as a transport.
+
+	  If this driver is to be used in a Cell that has multiple
+	  discontiguous regions in its physical memory pool, the
+	  CONFIG_DMA_CMA option must also be selected (or CONFIG_CMA
+	  in older kernels that do not have CONFIG_DMA_CMA).
+
+endmenu
diff --git a/drivers/vservices/transport/Makefile b/drivers/vservices/transport/Makefile
new file mode 100644
index 0000000..222fb51
--- /dev/null
+++ b/drivers/vservices/transport/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Werror
+ccflags-$(CONFIG_VSERVICES_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VSERVICES_OKL4_AXON)	+= vtransport_axon.o
+vtransport_axon-objs = axon.o
diff --git a/drivers/vservices/transport/axon.c b/drivers/vservices/transport/axon.c
new file mode 100644
index 0000000..e3fcb23
--- /dev/null
+++ b/drivers/vservices/transport/axon.c
@@ -0,0 +1,3549 @@
+/*
+ * drivers/vservices/transport/axon.c
+ *
+ * Copyright (c) 2015-2018 General Dynamics
+ * Copyright (c) 2015 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the OKL4 Virtual Services transport driver for OKL4 Microvisor
+ * Axons (virtual inter-Cell DMA engines).
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/jiffies.h>
+#include <linux/log2.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/dma-contiguous.h>
+#include <linux/vmalloc.h>
+#include <linux/mmzone.h>
+#include <asm-generic/okl4_virq.h>
+#include <asm/byteorder.h>
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+
+#include <microvisor/microvisor.h>
+
+#include "../transport.h"
+#include "../session.h"
+#include "../debug.h"
+
+#define DRIVER_AUTHOR "Cog Systems Pty Ltd"
+#define DRIVER_DESC "OKL4 vServices Axon Transport Driver"
+#define DRIVER_NAME "vtransport_axon"
+
+#define smp_mb__before_atomic_dec smp_mb__before_atomic
+#define smp_mb__before_atomic_inc smp_mb__before_atomic
+#define smp_mb__after_atomic_dec smp_mb__after_atomic
+
+#define DMA_ATTRS unsigned long
+
+static struct kmem_cache *mbuf_cache;
+
+struct child_device {
+	struct device *dev;
+	struct list_head list;
+};
+
+/* Number of services in the transport array to allocate at a time */
+#define SERVICES_ALLOC_CHUNK	16
+#define MSG_SEND_FREE_BUFS	VS_SERVICE_ID_RESERVED_1
+
+/* The maximum value we allow for the free_bufs_balance counter */
+#define MAX_BALANCE		1
+
+/*
+ * The free bufs quota must be enough to take free_bufs_balance from its
+ * minimum to its maximum.
+ */
+#define FREE_BUFS_QUOTA		(MAX_BALANCE * 2)
+
+/*
+ * The free bufs retry delay is the period in jiffies that we delay retrying
+ * after an out-of-memory condition when trying to send a free bufs message.
+ */
+#define FREE_BUFS_RETRY_DELAY	2
+
+/* The minimum values we permit for queue and message size. */
+#define MIN_QUEUE_SIZE		((size_t)4)
+#define MIN_MSG_SIZE		(32 - sizeof(vs_service_id_t))
+
+/*
+ * The maximum size for a batched receive. This should be larger than the
+ * maximum message size, and large enough to avoid excessive context switching
+ * overheads, yet small enough to avoid blocking the tasklet queue for too
+ * long.
+ */
+#define MAX_TRANSFER_CHUNK	65536
+
+#define INC_MOD(x, m) {						\
+	x++;							\
+	if (x == m) x = 0;					\
+}
+
+/* Local Axon cleanup workqueue */
+struct workqueue_struct *work_queue;
+
+/*
+ * True if there is only one physical segment being used for kernel memory
+ * allocations. If this is false, the device must have a usable CMA region.
+ */
+static bool okl4_single_physical_segment;
+
+/* OKL4 MMU capability. */
+static okl4_kcap_t okl4_mmu_cap;
+
+/*
+ * Per-service TX buffer allocation pool.
+ *
+ * We cannot use a normal DMA pool for TX buffers, because alloc_mbuf can be
+ * called with GFP_ATOMIC, and a normal DMA pool alloc will take pages from
+ * a global emergency pool if GFP_WAIT is not set. The emergency pool is not
+ * guaranteed to be in the same physical segment as this device's DMA region,
+ * so it might not be usable by the axon.
+ *
+ * Using a very simple allocator with preallocated memory also speeds up the
+ * TX path.
+ *
+ * RX buffers use a standard Linux DMA pool, shared between all services,
+ * rather than this struct. They are preallocated by definition, so the speed
+ * of the allocator doesn't matter much for them. Also, they're always
+ * allocated with GFP_KERNEL (which includes GFP_WAIT) so the normal DMA pool
+ * will use memory from the axon's contiguous region.
+ */
+struct vs_axon_tx_pool {
+	struct vs_transport_axon *transport;
+	struct kref kref;
+
+	void *base_vaddr;
+	dma_addr_t base_laddr;
+
+	unsigned alloc_order;
+	unsigned count;
+
+	struct work_struct free_work;
+	unsigned long alloc_bitmap[];
+};
+
+struct vs_axon_rx_freelist_entry {
+	struct list_head list;
+	dma_addr_t laddr;
+};
+
+/* Service info */
+struct vs_mv_service_info {
+	struct vs_service_device *service;
+
+	/* True if the session has started the service */
+	bool ready;
+
+	/* Number of send buffers we have allocated, in total. */
+	atomic_t send_inflight;
+
+	/*
+	 * Number of send buffers we have allocated but not yet sent.
+	 * This should always be zero if ready is false.
+	 */
+	atomic_t send_alloc;
+
+	/*
+	 * Number of receive buffers we have received and not yet freed.
+	 * This should always be zero if ready is false.
+	 */
+	atomic_t recv_inflight;
+
+	/*
+	 * Number of receive buffers we have freed, but not told the other end
+	 * about yet.
+	 *
+	 * The watermark is the maximum number of freed buffers we can
+	 * accumulate before we send a dummy message to the remote end to ack
+	 * them. This is used in situations where the protocol allows the remote
+	 * end to reach its send quota without guaranteeing a reply; the dummy
+	 * message lets it make progress even if our service driver doesn't send
+	 * an answer that we can piggy-back the acks on.
+	 */
+	atomic_t recv_freed;
+	unsigned int recv_freed_watermark;
+
+	/*
+	 * Number of buffers that have been left allocated after a reset. If
+	 * this count is nonzero, then the service has been disabled by the
+	 * session layer, and needs to be re-enabled when it reaches zero.
+	 */
+	atomic_t outstanding_frees;
+
+	/* TX allocation pool */
+	struct vs_axon_tx_pool *tx_pool;
+
+	/* RX allocation count */
+	unsigned rx_allocated;
+
+	/* Reference count for this info struct. */
+	struct kref kref;
+
+	/* RCU head for cleanup */
+	struct rcu_head rcu_head;
+};
+
+/*
+ * Transport readiness state machine
+ *
+ * This is similar to the service readiness state machine, but simpler,
+ * because there are fewer transition triggers.
+ *
+ * The states are:
+ * INIT: Initial state. This occurs transiently during probe.
+ * LOCAL_RESET: We have initiated a reset at this end, but the remote end has
+ * not yet acknowledged it. We will enter the RESET state on receiving
+ * acknowledgement.
+ * RESET: The transport is inactive at both ends, and the session layer has
+ * not yet told us to start activating.
+ * LOCAL_READY: The session layer has told us to start activating, and we
+ * have notified the remote end that we're ready.
+ * REMOTE_READY: The remote end has notified us that it is ready, but the
+ * local session layer hasn't decided to become ready yet.
+ * ACTIVE: Both ends are ready to communicate.
+ * SHUTDOWN: The transport is shutting down and should not become ready.
+ */
+enum vs_transport_readiness {
+	VS_TRANSPORT_INIT = 0,
+	VS_TRANSPORT_LOCAL_RESET,
+	VS_TRANSPORT_RESET,
+	VS_TRANSPORT_LOCAL_READY,
+	VS_TRANSPORT_REMOTE_READY,
+	VS_TRANSPORT_ACTIVE,
+	VS_TRANSPORT_SHUTDOWN,
+};
+
+/*
+ * Transport reset / ready VIRQ payload bits
+ */
+enum vs_transport_reset_virq {
+	VS_TRANSPORT_VIRQ_RESET_REQ = (1 << 0),
+	VS_TRANSPORT_VIRQ_RESET_ACK = (1 << 1),
+	VS_TRANSPORT_VIRQ_READY = (1 << 2),
+};
+
+/*
+ * Internal definitions of the transport and message buffer structures.
+ */
+#define MAX_NOTIFICATION_LINES 16 /* Enough for 512 notifications each way */
+
+struct vs_transport_axon {
+	struct device *axon_dev;
+
+	struct okl4_axon_tx *tx;
+	struct okl4_axon_queue_entry *tx_descs;
+	struct vs_axon_tx_pool **tx_pools;
+	struct okl4_axon_rx *rx;
+	struct okl4_axon_queue_entry *rx_descs;
+	void **rx_ptrs;
+
+	dma_addr_t tx_phys, rx_phys;
+	size_t tx_size, rx_size;
+
+	okl4_kcap_t segment;
+	okl4_laddr_t segment_base;
+
+	okl4_kcap_t tx_cap, rx_cap, reset_cap;
+	unsigned int tx_irq, rx_irq, reset_irq;
+	okl4_interrupt_number_t reset_okl4_irq;
+
+	unsigned int notify_tx_nirqs;
+	okl4_kcap_t notify_cap[MAX_NOTIFICATION_LINES];
+	unsigned int notify_rx_nirqs;
+	unsigned int notify_irq[MAX_NOTIFICATION_LINES];
+
+	bool is_server;
+	size_t msg_size, queue_size;
+
+	/*
+	 * The handle to the device tree node for the virtual-session node
+	 * associated with the axon.
+	 */
+	struct device_node *of_node;
+
+	struct list_head child_dev_list;
+
+	/*
+	 * Hold queue and tx tasklet used to buffer and resend mbufs blocked
+	 * by a full outgoing axon queue, due to a slow receiver or a halted
+	 * axon.
+	 */
+	struct list_head tx_queue;
+	struct tasklet_struct tx_tasklet;
+	u32 tx_uptr_freed;
+
+	/*
+	 * The readiness state of the transport, and a spinlock protecting it.
+	 * Note that this is different to the session's readiness state
+	 * machine, though it has the same basic purpose.
+	 */
+	enum vs_transport_readiness readiness;
+	spinlock_t readiness_lock;
+
+	struct tasklet_struct rx_tasklet;
+	struct timer_list rx_retry_timer;
+	struct list_head rx_freelist;
+	u32 rx_alloc_extra;
+	struct dma_pool *rx_pool;
+	spinlock_t rx_alloc_lock;
+	u32 rx_uptr_allocated;
+
+	struct vs_session_device *session_dev;
+	struct vs_transport transport;
+
+	DECLARE_BITMAP(service_bitmap, VS_SERVICE_ID_BITMAP_BITS);
+
+	struct delayed_work free_bufs_work;
+
+	/*
+	 * Freed buffers messages balance counter. This counter is incremented
+	 * when we send a freed buffers message and decremented when we receive
+	 * one. If the balance is negative then we need to send a message
+	 * as an acknowledgement to the other end, even if there are no
+	 * freed buffers to acknowledge.
+	 */
+	atomic_t free_bufs_balance;
+
+	/*
+	 * Flag set when a service exceeds its freed buffers watermark,
+	 * telling free_bufs_work to send a message when the balance
+	 * counter is non-negative. This is ignored, and a message is
+	 * sent in any case, if the balance is negative.
+	 */
+	bool free_bufs_pending;
+
+	/* Pool for allocating outgoing free bufs messages */
+	struct vs_axon_tx_pool *free_bufs_pool;
+};
+
+#define to_vs_transport_axon(t) \
+	container_of(t, struct vs_transport_axon, transport)
+
+struct vs_mbuf_axon {
+	struct vs_mbuf base;
+	struct vs_transport_axon *owner;
+	dma_addr_t laddr;
+	struct vs_axon_tx_pool *pool;
+};
+
+#define to_vs_mbuf_axon(b) container_of(b, struct vs_mbuf_axon, base)
+
+/*
+ * Buffer allocation
+ *
+ * Buffers used by axons must be allocated within a single contiguous memory
+ * region, backed by a single OKL4 physical segment. This is similar to how
+ * the DMA allocator normally works, but we can't use the normal DMA allocator
+ * because the platform code will remap the allocated memory with caching
+ * disabled.
+ *
+ * We borrow the useful parts of the DMA allocator by providing our own DMA
+ * mapping ops which don't actually remap the memory.
+ */
+static void *axon_dma_alloc(struct device *dev, size_t size,
+		dma_addr_t *handle, gfp_t gfp, DMA_ATTRS attrs)
+{
+	unsigned long order;
+	size_t count;
+	struct page *page;
+	void *ptr;
+
+#ifdef DMA_ERROR_CODE
+	*handle = DMA_ERROR_CODE;
+#else
+	*handle = 0;
+#endif
+	size = PAGE_ALIGN(size);
+
+	if (!(gfp & __GFP_RECLAIM))
+		return NULL;
+
+	order = get_order(size);
+	count = size >> PAGE_SHIFT;
+
+	if (dev_get_cma_area(dev)) {
+		page = dma_alloc_from_contiguous(dev, count, order, gfp);
+		if (!page)
+			return NULL;
+	} else {
+		struct page *p, *e;
+		page = alloc_pages(gfp, order);
+
+		if (!page)
+			return NULL;
+
+		/* Split huge page and free any excess pages */
+		split_page(page, order);
+		for (p = page + count, e = page + (1 << order); p < e; p++)
+			__free_page(p);
+	}
+
+	if (PageHighMem(page)) {
+		struct vm_struct *area = get_vm_area(size, VM_USERMAP);
+		if (!area)
+			goto free_pages;
+		ptr = area->addr;
+		area->phys_addr = __pfn_to_phys(page_to_pfn(page));
+
+		if (ioremap_page_range((unsigned long)ptr,
+					(unsigned long)ptr + size,
+					area->phys_addr, PAGE_KERNEL)) {
+			vunmap(ptr);
+			goto free_pages;
+		}
+	} else {
+		ptr = page_address(page);
+	}
+
+	*handle = (dma_addr_t)page_to_pfn(page) << PAGE_SHIFT;
+
+	dev_dbg(dev, "dma_alloc: %#tx bytes at %pK (%#llx), %s cma, %s high\n",
+			size, ptr, (long long)*handle,
+			dev_get_cma_area(dev) ? "is" : "not",
+			PageHighMem(page) ? "is" : "not");
+
+	return ptr;
+
+free_pages:
+	if (dev_get_cma_area(dev)) {
+		dma_release_from_contiguous(dev, page, count);
+	} else {
+		struct page *e = page + count;
+
+		while (page < e) {
+			__free_page(page);
+			page++;
+		}
+	}
+
+	return NULL;
+}
+
+static void axon_dma_free(struct device *dev, size_t size, void *cpu_addr,
+		dma_addr_t handle, DMA_ATTRS attrs)
+{
+	struct page *page = pfn_to_page(handle >> PAGE_SHIFT);
+
+	size = PAGE_ALIGN(size);
+
+	if (PageHighMem(page)) {
+		unmap_kernel_range((unsigned long)cpu_addr, size);
+		vunmap(cpu_addr);
+	}
+
+	if (dev_get_cma_area(dev)) {
+		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+	} else {
+		struct page *e = page + (size >> PAGE_SHIFT);
+
+		while (page < e) {
+			__free_page(page);
+			page++;
+		}
+	}
+}
+
+struct dma_map_ops axon_dma_ops = {
+	.alloc		= axon_dma_alloc,
+	.free		= axon_dma_free,
+};
+
+/*
+ * Quotas
+ * ------
+ *
+ * Each service has two quotas, one for send and one for receive. The
+ * send quota is incremented when we allocate an mbuf. The send quota
+ * is decremented by receiving an freed buffer ack from the remove
+ * end, either in the reserved bits of the service id or in a special
+ * free bufs message.
+ *
+ * The receive quota is incremented whenever we receive a message and
+ * decremented when we free the mbuf. Exceeding the receive quota
+ * indicates that something bad has happened since the other end's
+ * send quota should have prevented it from sending the
+ * message. Exceeding the receive quota indicates a driver bug since
+ * the two ends are disagreeing about the quotas. If this happens then
+ * a warning is printed and the offending service is reset.
+ */
+
+/*
+ * The base of the mbuf has the destination service id, but we pass the
+ * data pointer starting after the service id. The following helper
+ * functions are used to avoid ugly pointer arithmetic when handling
+ * mbufs.
+ */
+static size_t mbuf_real_size(struct vs_mbuf_axon *mbuf)
+{
+	return mbuf->base.size + sizeof(vs_service_id_t);
+}
+
+static void *mbuf_real_base(struct vs_mbuf_axon *mbuf)
+{
+	return mbuf->base.data - sizeof(vs_service_id_t);
+}
+/*
+ * Get the service_id and reserved bits from a message buffer and the
+ * clear the reserved bits so the upper layers don't see them.
+ */
+vs_service_id_t
+transport_get_mbuf_service_id(struct vs_transport_axon *transport,
+		void *data, unsigned int *freed_acks)
+{
+	unsigned int reserved_bits;
+	vs_service_id_t id;
+
+	/* Get the real service id and reserved bits */
+	id = *(vs_service_id_t *)data;
+	reserved_bits = vs_get_service_id_reserved_bits(id);
+	id = vs_get_real_service_id(id);
+
+	/* Clear the reserved bits in the service id */
+	vs_set_service_id_reserved_bits(&id, 0);
+	if (freed_acks) {
+		*(vs_service_id_t *)data = id;
+		*freed_acks = reserved_bits;
+	}
+	return id;
+}
+
+static void
+__transport_get_service_info(struct vs_mv_service_info *service_info)
+{
+	kref_get(&service_info->kref);
+}
+
+static struct vs_mv_service_info *
+transport_get_service_info(struct vs_service_device *service)
+{
+	struct vs_mv_service_info *service_info;
+
+	rcu_read_lock();
+	service_info = rcu_dereference(service->transport_priv);
+	if (service_info)
+		__transport_get_service_info(service_info);
+	rcu_read_unlock();
+
+	return service_info;
+}
+
+static struct vs_mv_service_info *
+transport_get_service_id_info(struct vs_transport_axon *transport,
+		vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+	struct vs_mv_service_info *service_info;
+
+	service = vs_session_get_service(transport->session_dev, service_id);
+	if (!service)
+		return NULL;
+
+	service_info = transport_get_service_info(service);
+
+	vs_put_service(service);
+	return service_info;
+}
+
+static void transport_info_free(struct rcu_head *rcu_head)
+{
+	struct vs_mv_service_info *service_info =
+		container_of(rcu_head, struct vs_mv_service_info, rcu_head);
+
+	vs_put_service(service_info->service);
+	kfree(service_info);
+}
+
+static void transport_info_release(struct kref *kref)
+{
+	struct vs_mv_service_info *service_info =
+		container_of(kref, struct vs_mv_service_info, kref);
+
+	call_rcu(&service_info->rcu_head, transport_info_free);
+}
+
+static void transport_put_service_info(struct vs_mv_service_info *service_info)
+{
+	kref_put(&service_info->kref, transport_info_release);
+}
+
+static bool transport_axon_reset(struct vs_transport_axon *transport);
+
+static void transport_fatal_error(struct vs_transport_axon *transport,
+		const char *msg)
+{
+	dev_err(transport->axon_dev, "Fatal transport error (%s); resetting\n",
+			msg);
+#ifdef DEBUG
+	dump_stack();
+#endif
+	transport_axon_reset(transport);
+}
+
+static unsigned int reduce_send_quota(struct vs_transport_axon *transport,
+		struct vs_mv_service_info *service_info, unsigned int count,
+		bool allow_tx_ready)
+{
+	int new_inflight, send_alloc;
+	bool was_over_quota, is_over_quota;
+
+        /* FIXME: Redmine issue #1303 - philip. */
+	spin_lock_irq(&transport->readiness_lock);
+	/*
+	 * We read the current send_alloc for error checking *before*
+	 * decrementing send_inflight. This avoids any false positives
+	 * due to send_alloc being incremented by a concurrent alloc_mbuf.
+	 *
+	 * Note that there is an implicit smp_mb() before atomic_sub_return(),
+	 * matching the explicit one in alloc_mbuf.
+	 */
+	send_alloc = atomic_read(&service_info->send_alloc);
+	new_inflight = atomic_sub_return(count, &service_info->send_inflight);
+
+	spin_unlock_irq(&transport->readiness_lock);
+	if (WARN_ON(new_inflight < send_alloc)) {
+		dev_err(transport->axon_dev,
+				"inflight sent messages for service %d is less than the number of allocated messages (%d < %d, was reduced by %d)\n",
+				service_info->service->id, new_inflight,
+				send_alloc, count);
+		transport_fatal_error(transport, "sent msg count underrun");
+		return 0;
+	}
+
+	was_over_quota = (new_inflight + count >=
+			service_info->service->send_quota);
+	is_over_quota = (new_inflight > service_info->service->send_quota);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"Service %d quota %d -> %d (over_quota: %d -> %d)\n",
+			service_info->service->id, new_inflight + count,
+			new_inflight, was_over_quota, is_over_quota);
+
+	/*
+	 * Notify the service that a buffer has been freed. We call tx_ready
+	 * if this is a notification from the remote end (i.e. not an unsent
+	 * buffer) and the quota has just dropped below the maximum.
+	 */
+	vs_session_quota_available(transport->session_dev,
+			service_info->service->id, count,
+			!is_over_quota && was_over_quota && allow_tx_ready);
+
+	return count;
+}
+
+static void __transport_tx_pool_free(struct vs_axon_tx_pool *pool,
+		dma_addr_t laddr);
+
+static void
+__transport_tx_cleanup(struct vs_transport_axon *transport)
+{
+	u32 uptr;
+	struct okl4_axon_queue_entry *desc;
+
+	lockdep_assert_held(&transport->readiness_lock);
+
+	uptr = transport->tx_uptr_freed;
+	desc = &transport->tx_descs[uptr];
+
+	while (!okl4_axon_data_info_getpending(&desc->info)) {
+		if (!transport->tx_pools[uptr])
+			break;
+
+		__transport_tx_pool_free(transport->tx_pools[uptr],
+				okl4_axon_data_info_getladdr(&desc->info));
+		transport->tx_pools[uptr] = NULL;
+
+		INC_MOD(uptr, transport->tx->queues[0].entries);
+		desc = &transport->tx_descs[uptr];
+		transport->tx_uptr_freed = uptr;
+	}
+}
+
+static void
+transport_axon_free_tx_pool(struct work_struct *work)
+{
+	struct vs_axon_tx_pool *pool = container_of(work,
+			struct vs_axon_tx_pool, free_work);
+	struct vs_transport_axon *transport = pool->transport;
+
+	dmam_free_coherent(transport->axon_dev,
+			pool->count << pool->alloc_order,
+			pool->base_vaddr, pool->base_laddr);
+	devm_kfree(transport->axon_dev, pool);
+}
+
+static void
+transport_axon_queue_free_tx_pool(struct kref *kref)
+{
+	struct vs_axon_tx_pool *pool = container_of(kref,
+			struct vs_axon_tx_pool, kref);
+
+	/*
+	 * Put the task on the axon local work queue for running in
+	 * a context where IRQ is enabled.
+	 */
+	INIT_WORK(&pool->free_work, transport_axon_free_tx_pool);
+	queue_work(work_queue, &pool->free_work);
+}
+
+static void
+transport_axon_put_tx_pool(struct vs_axon_tx_pool *pool)
+{
+	kref_put(&pool->kref, transport_axon_queue_free_tx_pool);
+}
+
+/* Low-level tx buffer allocation, without quota tracking. */
+static struct vs_mbuf_axon *
+__transport_alloc_mbuf(struct vs_transport_axon *transport,
+		vs_service_id_t service_id, struct vs_axon_tx_pool *pool,
+		size_t size, gfp_t gfp_flags)
+{
+	size_t real_size = size + sizeof(vs_service_id_t);
+	struct vs_mbuf_axon *mbuf;
+	unsigned index;
+
+	if (WARN_ON(real_size > (1 << pool->alloc_order))) {
+		dev_err(transport->axon_dev, "Message too big (%zu > %zu)\n",
+				real_size, (size_t)1 << pool->alloc_order);
+		goto fail_message_size;
+	}
+
+	kref_get(&pool->kref);
+
+	do {
+		index = find_first_zero_bit(pool->alloc_bitmap, pool->count);
+		if (unlikely(index >= pool->count)) {
+			/*
+			 * No buffers left. This can't be an out-of-quota
+			 * situation, because we've already checked the quota;
+			 * it must be because there's a buffer left over in
+			 * the tx queue. Clean out the tx queue and retry.
+			 */
+			spin_lock_irq(&transport->readiness_lock);
+			__transport_tx_cleanup(transport);
+			spin_unlock_irq(&transport->readiness_lock);
+
+			index = find_first_zero_bit(pool->alloc_bitmap,
+					pool->count);
+		}
+		if (unlikely(index >= pool->count))
+			goto fail_buffer_alloc;
+	} while (unlikely(test_and_set_bit_lock(index, pool->alloc_bitmap)));
+
+	mbuf = kmem_cache_alloc(mbuf_cache, gfp_flags & ~GFP_ZONEMASK);
+	if (!mbuf)
+		goto fail_mbuf_alloc;
+
+	mbuf->base.is_recv = false;
+	mbuf->base.data = pool->base_vaddr + (index << pool->alloc_order);
+	mbuf->base.size = size;
+	mbuf->owner = transport;
+	mbuf->laddr = pool->base_laddr + (index << pool->alloc_order);
+	mbuf->pool = pool;
+
+	/*
+	 * We put the destination service id in the mbuf, but increment the
+	 * data pointer past it so the receiver doesn't always need to skip
+	 * the service id.
+	 */
+	*(vs_service_id_t *)mbuf->base.data = service_id;
+	mbuf->base.data += sizeof(vs_service_id_t);
+
+	return mbuf;
+
+fail_mbuf_alloc:
+	clear_bit_unlock(index, pool->alloc_bitmap);
+fail_buffer_alloc:
+	transport_axon_put_tx_pool(pool);
+fail_message_size:
+	return NULL;
+}
+
+/* Allocate a tx buffer for a specified service. */
+static struct vs_mbuf *transport_alloc_mbuf(struct vs_transport *_transport,
+		struct vs_service_device *service, size_t size, gfp_t gfp_flags)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	size_t real_size = size + sizeof(vs_service_id_t);
+	struct vs_mv_service_info *service_info = NULL;
+	struct vs_mbuf_axon *mbuf;
+	vs_service_id_t service_id = service->id;
+
+	if (real_size > transport->msg_size) {
+		dev_err(transport->axon_dev, "Message too big (%zu > %zu)\n",
+				real_size, transport->msg_size);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (WARN_ON(service_id == MSG_SEND_FREE_BUFS))
+		return ERR_PTR(-ENXIO);
+
+	service_info = transport_get_service_info(service);
+	if (WARN_ON(!service_info))
+		return ERR_PTR(-EINVAL);
+
+	if (!service_info->tx_pool) {
+		transport_put_service_info(service_info);
+		return ERR_PTR(-ECONNRESET);
+	}
+
+	if (!atomic_add_unless(&service_info->send_inflight, 1,
+			service_info->service->send_quota)) {
+		/* Service has reached its quota */
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"Service %d is at max send quota %d\n",
+				service_id, service_info->service->send_quota);
+		transport_put_service_info(service_info);
+		return ERR_PTR(-ENOBUFS);
+	}
+
+	/*
+	 * Increment the count of allocated but unsent mbufs. This is done
+	 * *after* the send_inflight increment (with a barrier to enforce
+	 * ordering) to ensure that send_inflight is never less than
+	 * send_alloc - see reduce_send_quota().
+	 */
+	smp_mb__before_atomic_inc();
+	atomic_inc(&service_info->send_alloc);
+
+	mbuf = __transport_alloc_mbuf(transport, service_id,
+			service_info->tx_pool, size, gfp_flags);
+	if (!mbuf) {
+		/*
+		 * Failed to allocate a buffer - decrement our quota back to
+		 * where it was.
+		 */
+		atomic_dec(&service_info->send_alloc);
+		smp_mb__after_atomic_dec();
+		atomic_dec(&service_info->send_inflight);
+
+		transport_put_service_info(service_info);
+
+		return ERR_PTR(-ENOMEM);
+	}
+
+	transport_put_service_info(service_info);
+
+	return &mbuf->base;
+}
+
+static void transport_free_sent_mbuf(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf)
+{
+	kmem_cache_free(mbuf_cache, mbuf);
+}
+
+static void __transport_tx_pool_free(struct vs_axon_tx_pool *pool,
+		dma_addr_t laddr)
+{
+	unsigned index = (laddr - pool->base_laddr) >> pool->alloc_order;
+
+	if (WARN_ON(index >= pool->count)) {
+		printk(KERN_DEBUG "free %#llx base %#llx order %d count %d\n",
+				(long long)laddr, (long long)pool->base_laddr,
+				pool->alloc_order, pool->count);
+		return;
+	}
+
+	clear_bit_unlock(index, pool->alloc_bitmap);
+	transport_axon_put_tx_pool(pool);
+}
+
+static int transport_rx_queue_buffer(struct vs_transport_axon *transport,
+		void *ptr, dma_addr_t laddr);
+
+static void transport_rx_recycle(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf)
+{
+	void *data = mbuf_real_base(mbuf);
+	dma_addr_t laddr = mbuf->laddr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&transport->rx_alloc_lock, flags);
+
+	if (transport->rx_alloc_extra) {
+		transport->rx_alloc_extra--;
+		dma_pool_free(transport->rx_pool, data, laddr);
+	} else if (transport_rx_queue_buffer(transport, data, laddr) < 0) {
+		struct vs_axon_rx_freelist_entry *buf = data;
+		buf->laddr = laddr;
+		list_add_tail(&buf->list, &transport->rx_freelist);
+		tasklet_schedule(&transport->rx_tasklet);
+	} else {
+		tasklet_schedule(&transport->rx_tasklet);
+	}
+
+	spin_unlock_irqrestore(&transport->rx_alloc_lock, flags);
+}
+
+static void transport_free_mbuf_pools(struct vs_transport_axon *transport,
+		struct vs_service_device *service,
+		struct vs_mv_service_info *service_info)
+{
+	/*
+	 * Free the TX allocation pool. This will also free any buffer
+	 * memory allocated from the pool, so it is essential that
+	 * this happens only after we have successfully freed all
+	 * mbufs.
+	 *
+	 * Note that the pool will not exist if the core client is reset
+	 * before it receives a startup message.
+	 */
+	if (!IS_ERR_OR_NULL(service_info->tx_pool))
+		transport_axon_put_tx_pool(service_info->tx_pool);
+	service_info->tx_pool = NULL;
+
+	/* Mark the service's preallocated RX buffers as extra. */
+	spin_lock_irq(&transport->rx_alloc_lock);
+	transport->rx_alloc_extra += service_info->rx_allocated;
+	service_info->rx_allocated = 0;
+	spin_unlock_irq(&transport->rx_alloc_lock);
+}
+
+/* Low-level tx or rx buffer free, with no quota tracking */
+static void __transport_free_mbuf(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf, bool is_rx)
+{
+	if (is_rx) {
+		transport_rx_recycle(transport, mbuf);
+	} else {
+		__transport_tx_pool_free(mbuf->pool, mbuf->laddr);
+	}
+
+	kmem_cache_free(mbuf_cache, mbuf);
+}
+
+static void transport_free_mbuf(struct vs_transport *_transport,
+		struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+	struct vs_mv_service_info *service_info = NULL;
+	void *data = mbuf_real_base(mbuf);
+	vs_service_id_t service_id __maybe_unused =
+		transport_get_mbuf_service_id(transport, data, NULL);
+	bool is_recv = mbuf->base.is_recv;
+
+	WARN_ON(!service);
+	service_info = transport_get_service_info(service);
+
+	__transport_free_mbuf(transport, mbuf, is_recv);
+
+	/*
+	 * If this message was left over from a service that has already been
+	 * deleted, we don't need to do any quota accounting.
+	 */
+	if (!service_info)
+		return;
+
+	if (unlikely(atomic_read(&service_info->outstanding_frees))) {
+		if (atomic_dec_and_test(&service_info->outstanding_frees)) {
+			dev_dbg(transport->axon_dev,
+				"service %d all outstanding frees done\n",
+				service->id);
+			transport_free_mbuf_pools(transport, service,
+					service_info);
+			vs_service_enable(service);
+		} else {
+			dev_dbg(transport->axon_dev,
+				"service %d outstanding frees -> %d\n",
+				service->id, atomic_read(
+					&service_info->outstanding_frees));
+		}
+	} else if (is_recv) {
+		smp_mb__before_atomic_dec();
+		atomic_dec(&service_info->recv_inflight);
+		if (atomic_inc_return(&service_info->recv_freed) >=
+				service_info->recv_freed_watermark) {
+			transport->free_bufs_pending = true;
+			schedule_delayed_work(&transport->free_bufs_work, 0);
+		}
+
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"Freed recv buffer for service %d rq=%d/%d, freed=%d (watermark = %d)\n",
+				service_id,
+				atomic_read(&service_info->recv_inflight),
+				service_info->service->recv_quota,
+				atomic_read(&service_info->recv_freed),
+				service_info->recv_freed_watermark);
+	} else {
+		/*
+		 * We are freeing a message buffer that we allocated. This
+		 * usually happens on error paths in application drivers if
+		 * we allocated a buffer but failed to send it. In this case
+		 * we need to decrement our own send quota since we didn't
+		 * send anything.
+		 */
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"Freeing send buffer for service %d, send quota = %d\n",
+				service_id, atomic_read(&service_info->send_inflight));
+
+		smp_mb__before_atomic_dec();
+		atomic_dec(&service_info->send_alloc);
+
+		/*
+		 * We don't allow the tx_ready handler to run when we are
+		 * freeing an mbuf that we allocated.
+		 */
+		reduce_send_quota(transport, service_info, 1, false);
+	}
+
+	transport_put_service_info(service_info);
+}
+
+static size_t transport_mbuf_size(struct vs_mbuf *_mbuf)
+{
+	struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+
+	return mbuf_real_size(mbuf);
+}
+
+static size_t transport_max_mbuf_size(struct vs_transport *_transport)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+	return transport->msg_size - sizeof(vs_service_id_t);
+}
+
+static int okl4_error_to_errno(okl4_error_t err) {
+	switch (err) {
+	case OKL4_OK:
+		return 0;
+	case OKL4_ERROR_AXON_QUEUE_NOT_MAPPED:
+		/* Axon has been reset locally */
+		return -ECONNRESET;
+	case OKL4_ERROR_AXON_QUEUE_NOT_READY:
+		/* No message buffers in the queue. */
+		return -ENOBUFS;
+	case OKL4_ERROR_AXON_INVALID_OFFSET:
+	case OKL4_ERROR_AXON_AREA_TOO_BIG:
+		/* Buffer address is bad */
+		return -EFAULT;
+	case OKL4_ERROR_AXON_BAD_MESSAGE_SIZE:
+	case OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED:
+		/* One of the Axon's message size limits has been exceeded */
+		return -EMSGSIZE;
+	default:
+		/* Miscellaneous failure, probably a bad cap */
+		return -EIO;
+	}
+}
+
+static void queue_tx_mbuf(struct vs_mbuf_axon *mbuf, struct vs_transport_axon *priv,
+		vs_service_id_t service_id)
+{
+	list_add_tail(&mbuf->base.queue, &priv->tx_queue);
+}
+
+static void free_tx_mbufs(struct vs_transport_axon *priv)
+{
+	struct vs_mbuf_axon *child, *tmp;
+
+	list_for_each_entry_safe(child, tmp, &priv->tx_queue, base.queue) {
+		list_del(&child->base.queue);
+		__transport_free_mbuf(priv, child, false);
+	}
+}
+
+static int __transport_flush(struct vs_transport_axon *transport)
+{
+	_okl4_sys_axon_trigger_send(transport->tx_cap);
+	return 0;
+}
+
+static int transport_flush(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+	return __transport_flush(transport);
+}
+
+/*
+ * Low-level transport message send function.
+ *
+ * The caller must hold the transport->readiness_lock, and is responsible for
+ * freeing the mbuf on successful send (use transport_free_sent_mbuf). The
+ * mbuf should _not_ be freed if this function fails. The Virtual Service
+ * driver is responsible for freeing the mbuf in the failure case.
+ */
+static int __transport_send(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf, vs_service_id_t service_id,
+		unsigned long flags)
+{
+	u32 uptr;
+	struct okl4_axon_queue_entry *desc;
+	struct vs_axon_tx_pool *old_pool;
+	dma_addr_t old_laddr;
+
+	lockdep_assert_held(&transport->readiness_lock);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"send %zu bytes to service %d\n",
+			mbuf->base.size, service_id);
+	vs_debug_dump_mbuf(transport->session_dev, &mbuf->base);
+
+	uptr = READ_ONCE(transport->tx->queues[0].uptr);
+	desc = &transport->tx_descs[uptr];
+
+	/* Is the descriptor ready to use? */
+	if (okl4_axon_data_info_getpending(&desc->info))
+		return -ENOSPC;
+	mb();
+
+	/* The descriptor is ours; save its old state and increment the uptr */
+	old_pool = transport->tx_pools[uptr];
+	if (old_pool != NULL)
+		old_laddr = okl4_axon_data_info_getladdr(&desc->info);
+	transport->tx_pools[uptr] = mbuf->pool;
+
+	INC_MOD(uptr, transport->tx->queues[0].entries);
+	WRITE_ONCE(transport->tx->queues[0].uptr, uptr);
+
+	/* Set up the descriptor */
+	desc->data_size = mbuf_real_size(mbuf);
+	okl4_axon_data_info_setladdr(&desc->info, mbuf->laddr);
+
+	/* Message is ready to go */
+	wmb();
+	okl4_axon_data_info_setpending(&desc->info, true);
+
+	if (flags & VS_TRANSPORT_SEND_FLAGS_MORE) {
+		/*
+		 * This is a batched message, so we normally don't flush,
+		 * unless we've filled the queue completely.
+		 *
+		 * Races on the queue descriptor don't matter here, because
+		 * this is only an optimisation; the service should do an
+		 * explicit flush when it finishes the batch anyway.
+		 */
+		desc = &transport->tx_descs[uptr];
+		if (okl4_axon_data_info_getpending(&desc->info))
+			__transport_flush(transport);
+	} else {
+		__transport_flush(transport);
+	}
+
+	/* Free any buffer previously in the descriptor */
+	if (old_pool != NULL) {
+		u32 uptr_freed = transport->tx_uptr_freed;
+		INC_MOD(uptr_freed, transport->tx->queues[0].entries);
+		WARN_ON(uptr_freed != uptr);
+		__transport_tx_pool_free(old_pool, old_laddr);
+		transport->tx_uptr_freed = uptr_freed;
+	}
+
+	return 0;
+}
+
+static int transport_send_might_queue(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf, vs_service_id_t service_id,
+		unsigned long flags, bool *queued)
+{
+	int ret = 0;
+
+	lockdep_assert_held(&transport->readiness_lock);
+	*queued = false;
+
+	if (transport->readiness != VS_TRANSPORT_ACTIVE)
+		return -ECONNRESET;
+
+	if (!list_empty(&transport->tx_queue)) {
+		*queued = true;
+	} else {
+		ret = __transport_send(transport, mbuf, service_id, flags);
+		if (ret == -ENOSPC) {
+			*queued = true;
+			ret = 0;
+		}
+	}
+
+	if (*queued)
+		queue_tx_mbuf(mbuf, transport, service_id);
+
+	return ret;
+}
+
+static int transport_send(struct vs_transport *_transport,
+		struct vs_service_device *service, struct vs_mbuf *_mbuf,
+		unsigned long flags)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+	struct vs_mv_service_info *service_info;
+	vs_service_id_t service_id;
+	int recv_freed, freed_acks;
+	bool queued;
+	int err;
+	unsigned long irqflags;
+
+	if (WARN_ON(!transport || !mbuf || mbuf->owner != transport))
+		return -EINVAL;
+
+	service_id = transport_get_mbuf_service_id(transport,
+			mbuf_real_base(mbuf), NULL);
+
+	if (WARN_ON(service_id != service->id))
+		return -EINVAL;
+
+	service_info = transport_get_service_info(service);
+	if (!service_info)
+		return -EINVAL;
+
+	if (mbuf->base.is_recv) {
+		/*
+		 * This message buffer was allocated for receive. We don't
+		 * allow receive message buffers to be reused for sending
+		 * because it makes our quotas inconsistent.
+		 */
+		dev_err(&service_info->service->dev,
+				"Attempted to send a received message buffer\n");
+		transport_put_service_info(service_info);
+		return -EINVAL;
+	}
+
+	if (!service_info->ready) {
+		transport_put_service_info(service_info);
+		return -ECOMM;
+	}
+
+	/*
+	 * Set the message's service id reserved bits to the number of buffers
+	 * we have freed. We can only ack 2 ^ VS_SERVICE_ID_RESERVED_BITS - 1
+	 * buffers in one message.
+	 */
+	do {
+		recv_freed = atomic_read(&service_info->recv_freed);
+		freed_acks = min_t(int, recv_freed,
+				VS_SERVICE_ID_TRANSPORT_MASK);
+	} while (recv_freed != atomic_cmpxchg(&service_info->recv_freed,
+				recv_freed, recv_freed - freed_acks));
+
+	service_id = service_info->service->id;
+	vs_set_service_id_reserved_bits(&service_id, freed_acks);
+	*(vs_service_id_t *)mbuf_real_base(mbuf) = service_id;
+
+	spin_lock_irqsave(&transport->readiness_lock, irqflags);
+	err = transport_send_might_queue(transport, mbuf,
+			service_info->service->id, flags, &queued);
+	if (err) {
+		/* We failed to send, so revert the freed acks */
+		if (atomic_add_return(freed_acks,
+				&service_info->recv_freed) >=
+				service_info->recv_freed_watermark) {
+			transport->free_bufs_pending = true;
+			schedule_delayed_work(&transport->free_bufs_work, 0);
+		}
+		transport_put_service_info(service_info);
+		spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+		return err;
+	}
+
+	atomic_dec(&service_info->send_alloc);
+
+	if (queued) {
+		transport_put_service_info(service_info);
+		spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+		return 0;
+	}
+
+	/*
+	 * The mbuf was sent successfully. We can free it locally since it is
+	 * now owned by the remote end.
+	 */
+	transport_free_sent_mbuf(transport, mbuf);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"Send okay: service %d (0x%.2x) sq=%d/%d, alloc--=%d, rq=%d/%d, freed=%d/%d, bc=%d\n",
+			service_info->service->id, service_id,
+			atomic_read(&service_info->send_inflight),
+			service_info->service->send_quota,
+			atomic_read(&service_info->send_alloc),
+			atomic_read(&service_info->recv_inflight),
+			service_info->service->recv_quota, freed_acks,
+			atomic_read(&service_info->recv_freed),
+			atomic_read(&transport->free_bufs_balance));
+
+	transport_put_service_info(service_info);
+	spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+
+	return 0;
+}
+
+static void transport_free_bufs_work(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct vs_transport_axon *transport = container_of(dwork,
+			struct vs_transport_axon, free_bufs_work);
+	struct vs_mbuf_axon *mbuf;
+	int i, err, count = 0, old_balance;
+	bool queued;
+	size_t size;
+	u16 *p;
+
+	/*
+	 * Atomically decide whether to send a message, and increment
+	 * the balance if we are going to.
+	 *
+	 * We don't need barriers before these reads because they're
+	 * implicit in the work scheduling.
+	 */
+	do {
+		old_balance = atomic_read(&transport->free_bufs_balance);
+
+		/*
+		 * We only try to send if the balance is negative,
+		 * or if we have been triggered by going over a
+		 * watermark.
+		 */
+		if (old_balance >= 0 && !transport->free_bufs_pending)
+			return;
+
+		/*
+		 * If we've hit the max balance, we can't send. The
+		 * tasklet will be rescheduled next time the balance
+		 * is decremented, if free_bufs_pending is true.
+		 */
+		if (old_balance >= MAX_BALANCE)
+			return;
+
+	} while (old_balance != atomic_cmpxchg(&transport->free_bufs_balance,
+			old_balance, old_balance + 1));
+
+	/* Try to allocate a message buffer. */
+	mbuf = __transport_alloc_mbuf(transport, MSG_SEND_FREE_BUFS,
+			transport->free_bufs_pool,
+			transport->msg_size - sizeof(vs_service_id_t),
+			GFP_KERNEL | __GFP_NOWARN);
+	if (!mbuf) {
+		/* Out of memory at the moment; retry later. */
+		atomic_dec(&transport->free_bufs_balance);
+		schedule_delayed_work(dwork, FREE_BUFS_RETRY_DELAY);
+		return;
+	}
+
+	/*
+	 * Clear free_bufs_pending, because we are going to try to send.  We
+	 * need a write barrier afterwards to guarantee that this write is
+	 * ordered before any writes to the recv_freed counts, and therefore
+	 * before any remote free_bufs_pending = true when a service goes
+	 * over its watermark right after we inspect it.
+	 *
+	 * The matching barrier is implicit in the atomic_inc_return in
+	 * transport_free_mbuf().
+	 */
+	transport->free_bufs_pending = false;
+	smp_wmb();
+
+	/*
+	 * Fill in the buffer. Message format is:
+	 *
+	 *   u16: Number of services
+	 *
+	 *   For each service:
+	 *       u16: Service ID
+	 *       u16: Number of freed buffers
+	 */
+	p = mbuf->base.data;
+	*(p++) = 0;
+
+	for_each_set_bit(i, transport->service_bitmap,
+			VS_SERVICE_ID_BITMAP_BITS) {
+		struct vs_mv_service_info *service_info;
+		int recv_freed;
+		u16 freed_acks;
+
+		service_info = transport_get_service_id_info(transport, i);
+		if (!service_info)
+			continue;
+
+		/*
+		 * Don't let the message exceed the maximum size for the
+		 * transport.
+		 */
+		size = sizeof(vs_service_id_t) + sizeof(u16) +
+				(count * (2 * sizeof(u16)));
+		if (size > transport->msg_size) {
+			/* FIXME: Jira ticket SDK-3131 - ryanm. */
+			transport_put_service_info(service_info);
+			transport->free_bufs_pending = true;
+			break;
+		}
+
+		/*
+		 * We decrement each service's quota immediately by up to
+		 * USHRT_MAX. If we subsequently fail to send the message then
+		 * we return the count to what it was previously.
+		 */
+		do {
+			recv_freed = atomic_read(&service_info->recv_freed);
+			freed_acks = min_t(int, USHRT_MAX, recv_freed);
+		} while (recv_freed != atomic_cmpxchg(
+				&service_info->recv_freed,
+				recv_freed, recv_freed - freed_acks));
+
+		if (freed_acks) {
+			if (freed_acks < recv_freed)
+				transport->free_bufs_pending = true;
+
+			*(p++) = service_info->service->id;
+			*(p++) = freed_acks;
+			count++;
+
+			vs_dev_debug(VS_DEBUG_TRANSPORT,
+					transport->session_dev,
+					transport->axon_dev,
+					"  [%.2d] Freed %.2d buffers\n",
+					service_info->service->id,
+					freed_acks);
+		} else {
+			vs_dev_debug(VS_DEBUG_TRANSPORT,
+					transport->session_dev,
+					transport->axon_dev,
+					"  [%.2d] No buffers to free\n",
+					service_info->service->id);
+		}
+
+		transport_put_service_info(service_info);
+	}
+
+	if (transport->free_bufs_pending)
+		schedule_delayed_work(dwork, 0);
+
+	if (count == 0 && old_balance >= 0) {
+		/*
+		 * We are sending a new free bufs message, but we have no
+		 * freed buffers to tell the other end about. We don't send
+		 * an empty message unless the pre-increment balance was
+		 * negative (in which case we need to ack a remote free_bufs).
+		 *
+		 * Note that nobody else can increase the balance, so we only
+		 * need to check for a non-negative balance once before
+		 * decrementing. However, if the incoming free-bufs handler
+		 * concurrently decrements, the balance may become negative,
+		 * in which case we reschedule ourselves immediately to send
+		 * the ack.
+		 */
+		if (atomic_dec_return(&transport->free_bufs_balance) < 0)
+			schedule_delayed_work(dwork, 0);
+
+		__transport_free_mbuf(transport, mbuf, false);
+
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"No services had buffers to free\n");
+
+		return;
+	}
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"Sending free bufs message for %d services\n", count);
+
+	/* Fix up the message size */
+	p = mbuf->base.data;
+	*p = count;
+	mbuf->base.size = sizeof(u16) * ((count * 2) + 1);
+
+	spin_lock_irq(&transport->readiness_lock);
+	err = transport_send_might_queue(transport, mbuf, MSG_SEND_FREE_BUFS,
+			0, &queued);
+	if (err) {
+		spin_unlock_irq(&transport->readiness_lock);
+		goto fail;
+	}
+
+	/* FIXME: Jira ticket SDK-4675 - ryanm. */
+	if (!queued) {
+		/*
+		 * The mbuf was sent successfully. We can free it locally
+		 * since it is now owned by the remote end.
+		 */
+		transport_free_sent_mbuf(transport, mbuf);
+	}
+	spin_unlock_irq(&transport->readiness_lock);
+
+	return;
+
+fail:
+	dev_err(transport->axon_dev,
+			"Failed to send free bufs message: %d\n", err);
+	transport_fatal_error(transport, "free bufs send failed");
+}
+
+int transport_notify(struct vs_transport *_transport,
+		struct vs_service_device *service, unsigned long bits)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	unsigned long bit_offset, bitmask, word;
+	int first_set_bit, spilled_bits;
+
+	BUG_ON(!transport);
+
+	if (!bits)
+		return -EINVAL;
+
+	/* Check that the service isn't trying to raise bits it doesn't own */
+	if (bits & ~((1UL << service->notify_send_bits) - 1))
+		return -EINVAL;
+
+	bit_offset = service->notify_send_offset;
+	word = BIT_WORD(bit_offset);
+	bitmask = bits << (bit_offset % BITS_PER_LONG);
+
+	vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			"Sending notification %ld to service id %d\n", bitmask,
+			service->id);
+
+	_okl4_sys_vinterrupt_raise(transport->notify_cap[word], bitmask);
+
+	/*
+	* Bit range may spill into the next virqline.
+	*
+	* Check by adding the bit offset to the index of the highest set bit in
+	* the requested bitmask. If we need to raise a bit that is greater than
+	* bit 31, we have spilled into the next word and need to raise that too.
+	*/
+	first_set_bit = find_first_bit(&bits, BITS_PER_LONG);
+	spilled_bits = first_set_bit + bit_offset - (BITS_PER_LONG - 1);
+	if (spilled_bits > 0) {
+		/*
+		* Calculate the new bitmask for the spilled bits. We do this by
+		* shifting the requested bits to the right. The number of shifts
+		* is determined on where the first spilled bit is.
+		*/
+		int first_spilled_bit = first_set_bit - spilled_bits + 1;
+
+		bitmask = bits >> first_spilled_bit;
+
+		vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				"Sending notification %ld to service id %d\n", bitmask,
+				service->id);
+
+		_okl4_sys_vinterrupt_raise(transport->notify_cap[word + 1], bitmask);
+	}
+
+	return 0;
+}
+
+static void
+transport_handle_free_bufs_message(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf)
+{
+	struct vs_mv_service_info *service_info;
+	vs_service_id_t service_id;
+	u16 *p = mbuf->base.data;
+	int i, count, freed_acks, new_balance;
+
+	count = *(p++);
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"Free bufs message received for %d services\n", count);
+	for (i = 0; i < count; i++) {
+		int old_quota __maybe_unused;
+
+		service_id = *(p++);
+		freed_acks = *(p++);
+
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev, "  [%.2d] %.4d\n",
+				service_id, freed_acks);
+
+		service_info = transport_get_service_id_info(transport,
+				service_id);
+		if (!service_info) {
+			vs_dev_debug(VS_DEBUG_TRANSPORT,
+					transport->session_dev,
+					transport->axon_dev,
+					"Got %d free_acks for unknown service %d\n",
+					freed_acks, service_id);
+			continue;
+		}
+
+		old_quota = atomic_read(&service_info->send_inflight);
+		freed_acks = reduce_send_quota(transport, service_info,
+				freed_acks, service_info->ready);
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"  [%.2d] Freed %.2d buffers (%d -> %d, quota = %d)\n",
+				service_id, freed_acks, old_quota,
+				atomic_read(&service_info->send_inflight),
+				service_info->service->send_quota);
+
+		transport_put_service_info(service_info);
+	}
+
+	__transport_free_mbuf(transport, mbuf, true);
+
+	new_balance = atomic_dec_return(&transport->free_bufs_balance);
+	if (new_balance < -MAX_BALANCE) {
+		dev_err(transport->axon_dev,
+				"Balance counter fell below -MAX_BALANCE (%d < %d)\n",
+				atomic_read(&transport->free_bufs_balance),
+				-MAX_BALANCE);
+		transport_fatal_error(transport, "balance counter underrun");
+		return;
+	}
+
+	/* Check if we need to send a freed buffers message back */
+	if (new_balance < 0 || transport->free_bufs_pending)
+		schedule_delayed_work(&transport->free_bufs_work, 0);
+}
+
+static int transport_rx_queue_buffer(struct vs_transport_axon *transport,
+		void *ptr, dma_addr_t laddr)
+{
+	struct okl4_axon_queue_entry *desc;
+	okl4_axon_data_info_t info;
+
+	/* Select the buffer desc to reallocate */
+	desc = &transport->rx_descs[transport->rx_uptr_allocated];
+	info = READ_ONCE(desc->info);
+
+	/* If there is no space in the rx queue, fail */
+	if (okl4_axon_data_info_getusr(&info))
+		return -ENOSPC;
+
+	/* Don't update desc before reading the clear usr bit */
+	smp_mb();
+
+	/* Update the buffer pointer in the desc and mark it valid. */
+	transport->rx_ptrs[transport->rx_uptr_allocated] = ptr;
+	okl4_axon_data_info_setladdr(&info, (okl4_laddr_t)laddr);
+	okl4_axon_data_info_setpending(&info, true);
+	okl4_axon_data_info_setusr(&info, true);
+	mb();
+	WRITE_ONCE(desc->info, info);
+
+	/* Proceed to the next buffer */
+	INC_MOD(transport->rx_uptr_allocated,
+			transport->rx->queues[0].entries);
+
+	/* Return true if the next desc has no buffer yet */
+	desc = &transport->rx_descs[transport->rx_uptr_allocated];
+	return !okl4_axon_data_info_getusr(&desc->info);
+}
+
+/* TODO: multiple queue support / small message prioritisation */
+static int transport_process_msg(struct vs_transport_axon *transport)
+{
+	struct vs_mv_service_info *service_info;
+	struct vs_mbuf_axon *mbuf;
+	vs_service_id_t service_id;
+	unsigned freed_acks;
+	u32 uptr;
+	struct okl4_axon_queue_entry *desc;
+	void **ptr;
+	okl4_axon_data_info_t info;
+
+	/* Select the descriptor to receive from */
+	uptr = READ_ONCE(transport->rx->queues[0].uptr);
+	desc = &transport->rx_descs[uptr];
+	ptr = &transport->rx_ptrs[uptr];
+	info = READ_ONCE(desc->info);
+
+	/* Have we emptied the whole queue? */
+	if (!okl4_axon_data_info_getusr(&info))
+		return -ENOBUFS;
+
+	/* Has the next buffer been filled yet? */
+	if (okl4_axon_data_info_getpending(&info))
+		return 0;
+
+	/* Don't read the buffer or desc before seeing a cleared pending bit */
+	rmb();
+
+	/* Is the message too small to be valid? */
+	if (desc->data_size < sizeof(vs_service_id_t))
+		return -EBADMSG;
+
+	/* Allocate and set up the mbuf */
+	mbuf = kmem_cache_alloc(mbuf_cache, GFP_ATOMIC);
+	if (!mbuf)
+		return -ENOMEM;
+
+	mbuf->owner = transport;
+	mbuf->laddr = okl4_axon_data_info_getladdr(&info);
+	mbuf->pool = NULL;
+	mbuf->base.is_recv = true;
+	mbuf->base.data = *ptr + sizeof(vs_service_id_t);
+	mbuf->base.size = desc->data_size - sizeof(vs_service_id_t);
+
+	INC_MOD(uptr, transport->rx->queues[0].entries);
+	WRITE_ONCE(transport->rx->queues[0].uptr, uptr);
+
+	/* Finish reading desc before clearing usr bit */
+	smp_mb();
+
+	/* Re-check the pending bit, in case we've just been reset */
+	info = READ_ONCE(desc->info);
+	if (unlikely(okl4_axon_data_info_getpending(&info))) {
+		kmem_cache_free(mbuf_cache, mbuf);
+		return 0;
+	}
+
+	/* Clear usr bit; after this point the buffer is owned by the mbuf */
+	okl4_axon_data_info_setusr(&info, false);
+	WRITE_ONCE(desc->info, info);
+
+	/* Determine who to deliver the mbuf to */
+	service_id = transport_get_mbuf_service_id(transport,
+			mbuf_real_base(mbuf), &freed_acks);
+
+	if (service_id == MSG_SEND_FREE_BUFS) {
+		transport_handle_free_bufs_message(transport, mbuf);
+		return 1;
+	}
+
+	service_info = transport_get_service_id_info(transport, service_id);
+	if (!service_info) {
+		vs_dev_debug(VS_DEBUG_TRANSPORT,
+				transport->session_dev, transport->axon_dev,
+				"discarding message for missing service %d\n",
+				service_id);
+		__transport_free_mbuf(transport, mbuf, true);
+		return -EIDRM;
+	}
+
+	/*
+	 * If the remote end has freed some buffers that we sent it, then we
+	 * can decrement our send quota count by that amount.
+	 */
+	freed_acks = reduce_send_quota(transport, service_info,
+			freed_acks, service_info->ready);
+
+	/* If the service has been reset, drop the message. */
+	if (!service_info->ready) {
+		vs_dev_debug(VS_DEBUG_TRANSPORT,
+				transport->session_dev, transport->axon_dev,
+				"discarding message for reset service %d\n",
+				service_id);
+
+		__transport_free_mbuf(transport, mbuf, true);
+		transport_put_service_info(service_info);
+
+		return 1;
+	}
+
+	/*
+	 * Increment our recv quota since we are now holding a buffer. We
+	 * will decrement it when the buffer is freed in transport_free_mbuf.
+	 */
+	if (!atomic_add_unless(&service_info->recv_inflight, 1,
+				service_info->service->recv_quota)) {
+		/*
+		 * Going over the recv_quota indicates that something bad
+		 * has happened because either the other end has exceeded
+		 * its send quota or the two ends have a disagreement about
+		 * what the quota is.
+		 *
+		 * We free the buffer and reset the transport.
+		 */
+		dev_err(transport->axon_dev,
+				"Service %d is at max receive quota %d - resetting\n",
+				service_info->service->id,
+				service_info->service->recv_quota);
+
+		transport_fatal_error(transport, "rx quota exceeded");
+
+		__transport_free_mbuf(transport, mbuf, true);
+		transport_put_service_info(service_info);
+
+		return 0;
+	}
+
+	WARN_ON(atomic_read(&service_info->recv_inflight) >
+			service_info->service->recv_quota);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"receive %zu bytes from service 0x%.2x (%d): sq=%d/%d, rq=%d/%d, freed_acks=%d, freed=%d/%d bc=%d\n",
+			mbuf->base.size, service_info->service->id, service_id,
+			atomic_read(&service_info->send_inflight),
+			service_info->service->send_quota,
+			atomic_read(&service_info->recv_inflight),
+			service_info->service->recv_quota, freed_acks,
+			atomic_read(&service_info->recv_freed),
+			service_info->recv_freed_watermark,
+			atomic_read(&transport->free_bufs_balance));
+	vs_debug_dump_mbuf(transport->session_dev, &mbuf->base);
+
+	if (vs_session_handle_message(transport->session_dev, &mbuf->base,
+			service_id) < 0)
+		transport_free_mbuf(&transport->transport,
+				service_info->service, &mbuf->base);
+
+	transport_put_service_info(service_info);
+
+	return 1;
+}
+
+static void transport_flush_tx_queues(struct vs_transport_axon *transport)
+{
+	okl4_error_t err;
+	int i;
+
+	lockdep_assert_held(&transport->readiness_lock);
+
+	/* Release any queued mbufs */
+	free_tx_mbufs(transport);
+
+	/*
+	 * Re-attach the TX Axon's segment, which implicitly invalidates
+	 * the queues and stops any outgoing message transfers. The queues
+	 * will be reconfigured when the transport becomes ready again.
+	 */
+	err = _okl4_sys_axon_set_send_segment(transport->tx_cap,
+			transport->segment, transport->segment_base);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "TX reattach failed: %d\n",
+				(int)err);
+	}
+
+	/*
+	 * The TX Axon has stopped, so we can safely clear the pending
+	 * bit and free the buffer for any outgoing messages, and reset uptr
+	 * and kptr to 0.
+	 */
+	for (i = 0; i < transport->tx->queues[0].entries; i++) {
+		if (!transport->tx_pools[i])
+			continue;
+
+		okl4_axon_data_info_setpending(
+				&transport->tx_descs[i].info, false);
+		__transport_tx_pool_free(transport->tx_pools[i],
+				okl4_axon_data_info_getladdr(
+					&transport->tx_descs[i].info));
+		transport->tx_pools[i] = NULL;
+	}
+	transport->tx->queues[0].uptr = 0;
+	transport->tx->queues[0].kptr = 0;
+	transport->tx_uptr_freed = 0;
+}
+
+static void transport_flush_rx_queues(struct vs_transport_axon *transport)
+{
+	okl4_error_t err;
+	int i;
+
+	lockdep_assert_held(&transport->readiness_lock);
+
+	/*
+	 * Re-attach the TX Axon's segment, which implicitly invalidates
+	 * the queues and stops any incoming message transfers, though those
+	 * should already have cancelled those at the sending end. The queues
+	 * will be reconfigured when the transport becomes ready again.
+	 */
+	err = _okl4_sys_axon_set_recv_segment(transport->rx_cap,
+			transport->segment, transport->segment_base);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "RX reattach failed: %d\n",
+				(int)err);
+	}
+
+	/*
+	 * The RX Axon has stopped, so we can reset the pending bit on all
+	 * allocated message buffers to prepare them for reuse when the reset
+	 * completes.
+	 */
+	for (i = 0; i < transport->rx->queues[0].entries; i++) {
+		if (okl4_axon_data_info_getusr(&transport->rx_descs[i].info))
+			okl4_axon_data_info_setpending(
+					&transport->rx_descs[i].info, true);
+	}
+
+	/*
+	 * Reset kptr to the current uptr.
+	 *
+	 * We use a barrier here to ensure the pending bits are reset before
+	 * reading uptr, matching the barrier in transport_process_msg between
+	 * the uptr update and the second check of the pending bit. This means
+	 * that races with transport_process_msg() will end in one of two
+	 * ways:
+	 *
+	 * 1. transport_process_msg() updates uptr before this barrier, so the
+	 *    RX buffer is passed up to the session layer to be rejected there
+	 *    and recycled; or
+	 *
+	 * 2. the reset pending bit is seen by the second check in
+	 *    transport_process_msg(), which knows that it is being reset and
+	 *    can drop the message before it claims the buffer.
+	 */
+	smp_mb();
+	transport->rx->queues[0].kptr =
+		READ_ONCE(transport->rx->queues[0].uptr);
+
+	/*
+	 * Cancel any pending freed bufs work. We can't flush it here, but
+	 * that is OK: we will do so before we become ready.
+	 */
+	cancel_delayed_work(&transport->free_bufs_work);
+}
+
+static bool transport_axon_reset(struct vs_transport_axon *transport)
+{
+	okl4_error_t err;
+	unsigned long flags;
+	bool reset_complete = false;
+
+	spin_lock_irqsave(&transport->readiness_lock, flags);
+
+	/*
+	 * Reset the transport, dumping any messages in transit, and tell the
+	 * remote end that it should do the same.
+	 *
+	 * We only do this if the transport is not already marked reset. Doing
+	 * otherwise would be redundant.
+	 */
+	if ((transport->readiness != VS_TRANSPORT_RESET) &&
+			transport->readiness != VS_TRANSPORT_LOCAL_RESET &&
+			transport->readiness != VS_TRANSPORT_REMOTE_READY) {
+		/*
+		 * Flush the Axons' TX queues. We can't flush the RX queues
+		 * until after the remote end has acknowledged the reset.
+		 */
+		transport_flush_tx_queues(transport);
+
+		/*
+		 * Raise a reset request VIRQ, and discard any incoming reset
+		 * or ready notifications as they are now stale. Note that we
+		 * must do this in a single syscall.
+		 */
+		err = _okl4_sys_vinterrupt_clear_and_raise(
+				transport->reset_okl4_irq,
+				transport->reset_cap, 0UL,
+				VS_TRANSPORT_VIRQ_RESET_REQ).error;
+		if (err != OKL4_OK) {
+			dev_err(transport->axon_dev, "Reset raise failed: %d\n",
+					(int)err);
+		}
+
+		/* Local reset is complete */
+		if (transport->readiness != VS_TRANSPORT_SHUTDOWN)
+			transport->readiness = VS_TRANSPORT_LOCAL_RESET;
+	} else {
+		/* Already in reset */
+		reset_complete = true;
+	}
+
+	spin_unlock_irqrestore(&transport->readiness_lock, flags);
+
+	return reset_complete;
+}
+
+static void transport_reset(struct vs_transport *_transport)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev, "reset\n");
+
+	if (transport_axon_reset(transport)) {
+		vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				"reset while already reset (no-op)\n");
+
+		vs_session_handle_reset(transport->session_dev);
+	}
+}
+
+static void transport_ready(struct vs_transport *_transport)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	okl4_error_t err;
+
+	vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			"%s: becoming ready\n", __func__);
+
+	/*
+	 * Make sure any previously scheduled freed bufs work is cancelled.
+	 * It should not be possible for this to be rescheduled later, as long
+	 * as the transport is in reset.
+	 */
+	cancel_delayed_work_sync(&transport->free_bufs_work);
+	spin_lock_irq(&transport->readiness_lock);
+
+	atomic_set(&transport->free_bufs_balance, 0);
+	transport->free_bufs_pending = false;
+
+	switch(transport->readiness) {
+	case VS_TRANSPORT_RESET:
+		transport->readiness = VS_TRANSPORT_LOCAL_READY;
+		break;
+	case VS_TRANSPORT_REMOTE_READY:
+		vs_session_handle_activate(transport->session_dev);
+		transport->readiness = VS_TRANSPORT_ACTIVE;
+		break;
+	case VS_TRANSPORT_LOCAL_RESET:
+		/*
+		 * Session layer is confused; usually due to the reset at init
+		 * time, which it did not explicitly request, not having
+		 * completed yet. We just ignore it and wait for the reset. We
+		 * could avoid this by not starting the session until the
+		 * startup reset completes.
+		 */
+		spin_unlock_irq(&transport->readiness_lock);
+		return;
+	case VS_TRANSPORT_SHUTDOWN:
+		/* Do nothing. */
+		spin_unlock_irq(&transport->readiness_lock);
+		return;
+	default:
+		/* Session layer is broken */
+		WARN(1, "transport_ready() called in the wrong state: %d",
+				transport->readiness);
+		goto fail;
+	}
+
+	/* Raise a ready notification VIRQ. */
+	err = _okl4_sys_vinterrupt_raise(transport->reset_cap,
+			VS_TRANSPORT_VIRQ_READY);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "Ready raise failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	/*
+	 * Set up the Axons' queue pointers.
+	 */
+	err = _okl4_sys_axon_set_send_area(transport->tx_cap,
+			transport->tx_phys, transport->tx_size);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "TX set area failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	err = _okl4_sys_axon_set_send_queue(transport->tx_cap,
+			transport->tx_phys);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "TX set queue failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	err = _okl4_sys_axon_set_recv_area(transport->rx_cap,
+			transport->rx_phys, transport->rx_size);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "RX set area failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	err = _okl4_sys_axon_set_recv_queue(transport->rx_cap,
+			transport->rx_phys);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "RX set queue failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	spin_unlock_irq(&transport->readiness_lock);
+	return;
+
+fail:
+	spin_unlock_irq(&transport->readiness_lock);
+
+	transport_axon_reset(transport);
+}
+
+static int transport_service_add(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mv_service_info *service_info;
+
+	/*
+	 * We can't print out the core service add because the session
+	 * isn't fully registered at that time.
+	 */
+	if (service->id != 0)
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"Add service - id = %d\n", service->id);
+
+	service_info = kzalloc(sizeof(*service_info), GFP_KERNEL);
+	if (!service_info)
+		return -ENOMEM;
+
+	kref_init(&service_info->kref);
+
+	/* Matching vs_put_service() is in transport_info_free */
+	service_info->service = vs_get_service(service);
+
+	/* Make the service_info visible */
+	rcu_assign_pointer(service->transport_priv, service_info);
+
+	__set_bit(service->id, transport->service_bitmap);
+
+	return 0;
+}
+
+static void transport_service_remove(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mv_service_info *service_info;
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev, "Remove service - id = %d\n",
+			service->id);
+
+	__clear_bit(service->id, transport->service_bitmap);
+
+	service_info = service->transport_priv;
+	rcu_assign_pointer(service->transport_priv, NULL);
+
+	if (service_info->ready) {
+		dev_err(transport->axon_dev,
+				"Removing service %d while ready\n",
+				service->id);
+		transport_fatal_error(transport, "removing ready service");
+	}
+
+	transport_put_service_info(service_info);
+}
+
+static struct vs_axon_tx_pool *
+transport_axon_init_tx_pool(struct vs_transport_axon *transport,
+		size_t msg_size, unsigned send_quota)
+{
+	struct vs_axon_tx_pool *pool;
+
+	pool = devm_kzalloc(transport->axon_dev, sizeof(*pool) +
+			(sizeof(unsigned long) * BITS_TO_LONGS(send_quota)),
+			GFP_KERNEL);
+	if (!pool)
+		return ERR_PTR(-ENOMEM);
+
+	pool->transport = transport;
+	pool->alloc_order = ilog2(msg_size + sizeof(vs_service_id_t));
+	pool->count = send_quota;
+
+	pool->base_vaddr = dmam_alloc_coherent(transport->axon_dev,
+			send_quota << pool->alloc_order, &pool->base_laddr,
+			GFP_KERNEL);
+	if (!pool->base_vaddr) {
+		dev_err(transport->axon_dev, "Couldn't allocate %lu times %zu bytes for TX\n",
+				(unsigned long)pool->count, (size_t)1 << pool->alloc_order);
+		devm_kfree(transport->axon_dev, pool);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	kref_init(&pool->kref);
+	return pool;
+}
+
+static int transport_service_start(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_mv_service_info *service_info;
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_notify_info *info;
+	int i, ret;
+	bool enable_rx;
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev, "Start service - id = %d\n",
+			service->id);
+
+	service_info = service->transport_priv;
+	__transport_get_service_info(service_info);
+
+	/* We shouldn't have any mbufs left from before the last reset. */
+	if (WARN_ON(atomic_read(&service_info->outstanding_frees))) {
+		transport_put_service_info(service_info);
+		return -EBUSY;
+	}
+
+	/*
+	 * The watermark is set to half of the received-message quota, rounded
+	 * down, plus one. This is fairly arbitrary. The constant offset
+	 * ensures that we don't set it to 0 for services with 1 quota (and
+	 * thus trigger infinite free_bufs messages).
+	 */
+	service_info->recv_freed_watermark = (service->recv_quota + 1) / 2;
+
+	if (WARN_ON(service->notify_recv_bits + service->notify_recv_offset >
+				transport->notify_rx_nirqs * BITS_PER_LONG)) {
+		transport_put_service_info(service_info);
+		return -EINVAL;
+	}
+
+	if (WARN_ON(service->notify_send_bits + service->notify_send_offset >
+				transport->notify_tx_nirqs * BITS_PER_LONG)) {
+		transport_put_service_info(service_info);
+		return -EINVAL;
+	}
+
+	/* This is called twice for the core client only. */
+	WARN_ON(service->id != 0 && service_info->ready);
+
+	if (!service_info->ready) {
+		WARN_ON(atomic_read(&service_info->send_alloc));
+		WARN_ON(atomic_read(&service_info->recv_freed));
+		WARN_ON(atomic_read(&service_info->recv_inflight));
+	}
+
+	/* Create the TX buffer pool. */
+	WARN_ON(service->send_quota && service_info->tx_pool);
+	if (service->send_quota) {
+		service_info->tx_pool = transport_axon_init_tx_pool(transport,
+				transport->msg_size, service->send_quota);
+		if (IS_ERR(service_info->tx_pool)) {
+			ret = PTR_ERR(service_info->tx_pool);
+			service_info->tx_pool = NULL;
+			transport_put_service_info(service_info);
+			return ret;
+		}
+	}
+
+	/* Preallocate some RX buffers, if necessary. */
+	spin_lock_irq(&transport->rx_alloc_lock);
+	i = min(transport->rx_alloc_extra,
+			service->recv_quota - service_info->rx_allocated);
+	transport->rx_alloc_extra -= i;
+	service_info->rx_allocated += i;
+	spin_unlock_irq(&transport->rx_alloc_lock);
+
+	for (; service_info->rx_allocated < service->recv_quota;
+			service_info->rx_allocated++) {
+		dma_addr_t laddr;
+		struct vs_axon_rx_freelist_entry *buf =
+			dma_pool_alloc(transport->rx_pool, GFP_KERNEL, &laddr);
+		if (WARN_ON(!buf))
+			break;
+		buf->laddr = laddr;
+
+		spin_lock_irq(&transport->rx_alloc_lock);
+		list_add(&buf->list, &transport->rx_freelist);
+		spin_unlock_irq(&transport->rx_alloc_lock);
+	}
+
+	for (i = 0; i < service->notify_recv_bits; i++) {
+		unsigned bit = i + service->notify_recv_offset;
+		info = &transport->transport.notify_info[bit];
+
+		info->service_id = service->id;
+		info->offset = service->notify_recv_offset;
+	}
+
+	atomic_set(&service_info->send_inflight, 0);
+
+	/*
+	 * If this is the core service and it wasn't ready before, we need to
+	 * enable RX for the whole transport.
+	 */
+	enable_rx = service->id == 0 && !service_info->ready;
+
+	service_info->ready = true;
+
+	/* We're now ready to receive. */
+	if (enable_rx)
+		tasklet_enable(&transport->rx_tasklet);
+
+	transport_put_service_info(service_info);
+
+	return 0;
+}
+
+static int transport_service_reset(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_mv_service_info *service_info;
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mbuf_axon *child, *tmp;
+	int ret = 0, service_id, send_remaining, recv_remaining;
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev, "Reset service - id = %d\n",
+			service->id);
+
+	service_info = service->transport_priv;
+	__transport_get_service_info(service_info);
+
+	/*
+	 * Clear the ready bit with the tasklet disabled. After this point,
+	 * incoming messages will be discarded by transport_process_msg()
+	 * without incrementing recv_inflight, so we won't spuriously see
+	 * nonzero recv_inflight values for messages that would be discarded
+	 * in the session layer.
+	 */
+	tasklet_disable(&transport->rx_tasklet);
+	service_info->ready = false;
+	if (service->id)
+		tasklet_enable(&transport->rx_tasklet);
+
+	/*
+	 * Cancel and free all pending outgoing messages for the service being
+	 * reset; i.e. those that have been sent by the service but are not
+	 * yet in the axon queue.
+	 *
+	 * Note that this does not clean out the axon queue; messages there
+	 * are already visible to OKL4 and may be transferred at any time,
+	 * so we treat those as already sent.
+	 */
+	spin_lock_irq(&transport->readiness_lock);
+	list_for_each_entry_safe(child, tmp, &transport->tx_queue, base.queue) {
+		service_id = transport_get_mbuf_service_id(transport,
+				mbuf_real_base(child), NULL);
+		if (service_id == service->id) {
+			list_del(&child->base.queue);
+			__transport_tx_pool_free(child->pool, child->laddr);
+		}
+	}
+	spin_unlock_irq(&transport->readiness_lock);
+
+	/*
+	 * If any buffers remain allocated, we mark them as outstanding frees.
+	 * The transport will remain disabled until this count goes to zero.
+	 */
+	send_remaining = atomic_read(&service_info->send_alloc);
+	recv_remaining = atomic_read(&service_info->recv_inflight);
+	ret = atomic_add_return(send_remaining + recv_remaining,
+			&service_info->outstanding_frees);
+	dev_dbg(transport->axon_dev, "reset service %d with %d outstanding (send %d, recv %d)\n",
+			service->id, ret, send_remaining, recv_remaining);
+
+	/*
+	 * Reduce the send alloc count to 0, accounting for races with frees,
+	 * which might have reduced either the alloc count or the outstanding
+	 * count.
+	 */
+	while (send_remaining > 0) {
+		unsigned new_send_remaining = atomic_cmpxchg(
+				&service_info->send_alloc, send_remaining, 0);
+		if (send_remaining == new_send_remaining) {
+			smp_mb();
+			break;
+		}
+		WARN_ON(send_remaining < new_send_remaining);
+		ret = atomic_sub_return(send_remaining - new_send_remaining,
+				&service_info->outstanding_frees);
+		send_remaining = new_send_remaining;
+		dev_dbg(transport->axon_dev, "failed to zero send quota, now %d outstanding (%d send)\n",
+				ret, send_remaining);
+	}
+
+	/* Repeat the above for the recv inflight count. */
+	while (recv_remaining > 0) {
+		unsigned new_recv_remaining = atomic_cmpxchg(
+				&service_info->recv_inflight, recv_remaining,
+				0);
+		if (recv_remaining == new_recv_remaining) {
+			smp_mb();
+			break;
+		}
+		WARN_ON(recv_remaining < new_recv_remaining);
+		ret = atomic_sub_return(recv_remaining - new_recv_remaining,
+				&service_info->outstanding_frees);
+		recv_remaining = new_recv_remaining;
+		dev_dbg(transport->axon_dev, "failed to zero recv quota, now %d outstanding (%d send)\n",
+				ret, recv_remaining);
+	}
+
+	/* The outstanding frees count should never go negative */
+	WARN_ON(ret < 0);
+
+	/* Discard any outstanding freed buffer notifications. */
+	atomic_set(&service_info->recv_freed, 0);
+
+	/*
+	 * Wait for any previously queued free_bufs work to finish. This
+	 * guarantees that any freed buffer notifications that are already in
+	 * progress will be sent to the remote end before we return, and thus
+	 * before the reset is signalled.
+	 */
+	flush_delayed_work(&transport->free_bufs_work);
+
+	if (!ret)
+		transport_free_mbuf_pools(transport, service, service_info);
+
+	transport_put_service_info(service_info);
+
+	return ret;
+}
+
+static ssize_t transport_service_send_avail(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_mv_service_info *service_info;
+	ssize_t count = 0;
+
+	service_info = service->transport_priv;
+	if (!service_info)
+		return -EINVAL;
+
+	__transport_get_service_info(service_info);
+
+	count = service->send_quota -
+		atomic_read(&service_info->send_inflight);
+
+	transport_put_service_info(service_info);
+
+	return count < 0 ? 0 : count;
+}
+
+static void transport_get_notify_bits(struct vs_transport *_transport,
+		unsigned *send_notify_bits, unsigned *recv_notify_bits)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+	*send_notify_bits = transport->notify_tx_nirqs * BITS_PER_LONG;
+	*recv_notify_bits = transport->notify_rx_nirqs * BITS_PER_LONG;
+}
+
+static void transport_get_quota_limits(struct vs_transport *_transport,
+		unsigned *send_quota, unsigned *recv_quota)
+{
+	/*
+	 * This driver does not need to enforce a quota limit, because message
+	 * buffers are allocated from the kernel heap rather than a fixed
+	 * buffer area. The queue length only determines the maximum size of
+	 * a message batch, and the number of preallocated RX buffers.
+	 *
+	 * Note that per-service quotas are still enforced; there is simply no
+	 * hard limit on the total of all service quotas.
+	 */
+
+	*send_quota = UINT_MAX;
+	*recv_quota = UINT_MAX;
+}
+
+static const struct vs_transport_vtable tvt = {
+	.alloc_mbuf		= transport_alloc_mbuf,
+	.free_mbuf		= transport_free_mbuf,
+	.mbuf_size		= transport_mbuf_size,
+	.max_mbuf_size		= transport_max_mbuf_size,
+	.send			= transport_send,
+	.flush			= transport_flush,
+	.notify			= transport_notify,
+	.reset			= transport_reset,
+	.ready			= transport_ready,
+	.service_add		= transport_service_add,
+	.service_remove		= transport_service_remove,
+	.service_start		= transport_service_start,
+	.service_reset		= transport_service_reset,
+	.service_send_avail	= transport_service_send_avail,
+	.get_notify_bits	= transport_get_notify_bits,
+	.get_quota_limits	= transport_get_quota_limits,
+};
+
+/* Incoming notification handling for client */
+static irqreturn_t transport_axon_notify_virq(int irq, void *priv)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+	struct vs_notify_info *n_info;
+	unsigned long offset, bit = 0, notification;
+	int word;
+	okl4_virq_flags_t payload = okl4_get_virq_payload(irq);
+
+	for (word = 0; word < transport->notify_rx_nirqs; word++)
+		if (irq == transport->notify_irq[word])
+			break;
+
+	if (word == transport->notify_rx_nirqs) {
+		dev_err(transport->axon_dev, "Bad IRQ %d\n", irq);
+		return IRQ_NONE;
+	}
+
+	vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			"Got notification irq\n");
+
+#if defined(__BIG_ENDIAN)
+	/*
+	 * We rely on being able to use the Linux bitmap operations directly
+	 * on the VIRQ payload.
+	 */
+	BUILD_BUG_ON((sizeof(payload) % sizeof(unsigned long)) != 0);
+#endif
+
+	for_each_set_bit(bit, (unsigned long *)&payload, sizeof(payload) * 8) {
+		offset = bit + word * BITS_PER_LONG;
+
+		/*
+		 * We need to know which service id is associated
+		 * with which notification bit here. The transport is informed
+		 * about notification bit - service id mapping during the
+		 * initialhandshake protocol.
+		 */
+		n_info = &transport->transport.notify_info[offset];
+
+		notification = 1UL << (offset - n_info->offset);
+		vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				"Got notification bit %lu for service %d\n",
+				notification, n_info->service_id);
+
+		/* FIXME: Jira ticket SDK-2145 - shivanik. */
+		vs_session_handle_notify(transport->session_dev, notification,
+				n_info->service_id);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t transport_axon_reset_irq(int irq, void *priv)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+	bool do_reset = false;
+
+	u32 payload = okl4_get_virq_payload(irq);
+
+	spin_lock(&transport->readiness_lock);
+
+	if (payload & VS_TRANSPORT_VIRQ_RESET_REQ) {
+		okl4_error_t err;
+
+		transport->readiness = VS_TRANSPORT_RESET;
+
+		/* Flush the queues in both directions */
+		transport_flush_tx_queues(transport);
+		transport_flush_rx_queues(transport);
+
+		/*
+		 * When sending an ack, it is important to cancel any earlier
+		 * ready notification, so the recipient can safely assume that
+		 * the ack precedes any ready it sees
+		 */
+		err = _okl4_sys_vinterrupt_modify(transport->reset_cap,
+				~VS_TRANSPORT_VIRQ_READY,
+				VS_TRANSPORT_VIRQ_RESET_ACK);
+		if (err != OKL4_OK) {
+			dev_warn(transport->axon_dev,
+					"Error sending reset ack: %d\n", (int)err);
+		}
+
+		/*
+		 * Discard any pending ready event; it must have happened
+		 * before the reset request was raised, because we had not
+		 * yet sent the reset ack.
+		 */
+		payload = 0;
+		do_reset = true;
+	} else if (payload & VS_TRANSPORT_VIRQ_RESET_ACK) {
+		transport->readiness = VS_TRANSPORT_RESET;
+
+		/*
+		 * Flush the RX queues, as we know at this point that the
+		 * other end has flushed its TX queues.
+		 */
+		transport_flush_rx_queues(transport);
+
+		/*
+		 * Preserve any pending ready event; it must have been
+		 * generated after the ack (see above)
+		 */
+		payload &= VS_TRANSPORT_VIRQ_READY;
+		do_reset = true;
+	}
+
+	if (do_reset) {
+		/*
+		 * Reset the session. Note that duplicate calls to this are
+		 * expected if there are duplicate resets; they don't
+		 * necessarily match activate calls.
+		 */
+		vs_session_handle_reset(transport->session_dev);
+	}
+
+	if (payload & VS_TRANSPORT_VIRQ_READY) {
+		if (transport->readiness == VS_TRANSPORT_RESET) {
+			transport->readiness = VS_TRANSPORT_REMOTE_READY;
+		} else if (transport->readiness == VS_TRANSPORT_LOCAL_READY) {
+			vs_session_handle_activate(transport->session_dev);
+			transport->readiness = VS_TRANSPORT_ACTIVE;
+		} else {
+			/* Ready lost a race with reset; ignore it. */
+		}
+	}
+
+	spin_unlock(&transport->readiness_lock);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Axon VIRQ handling.
+ */
+static irqreturn_t transport_axon_rx_irq(int irq, void *priv)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+
+	okl4_axon_virq_flags_t flags = okl4_get_virq_payload(irq);
+
+	if (okl4_axon_virq_flags_getfault(&flags)) {
+		dev_err_ratelimited(transport->axon_dev,
+				"fault on RX axon buffer or queue; resetting\n");
+		transport_axon_reset(transport);
+	} else if (okl4_axon_virq_flags_getready(&flags)) {
+		tasklet_schedule(&transport->rx_tasklet);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t transport_axon_tx_irq(int irq, void *priv)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+
+	okl4_axon_virq_flags_t flags = okl4_get_virq_payload(irq);
+
+	if (okl4_axon_virq_flags_getfault(&flags)) {
+		dev_err_ratelimited(transport->axon_dev,
+				"fault on TX axon buffer or queue; resetting\n");
+		transport_axon_reset(transport);
+	} else if (okl4_axon_virq_flags_getready(&flags)) {
+		spin_lock(&transport->readiness_lock);
+		if (!list_empty(&transport->tx_queue))
+			tasklet_schedule(&transport->tx_tasklet);
+		spin_unlock(&transport->readiness_lock);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void transport_rx_tasklet(unsigned long data)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+	int status;
+	struct _okl4_sys_axon_process_recv_return recv_result;
+
+	/* Refill the RX queue */
+	spin_lock_irq(&transport->rx_alloc_lock);
+	while (!list_empty(&transport->rx_freelist)) {
+		struct vs_axon_rx_freelist_entry *buf;
+		buf = list_first_entry(&transport->rx_freelist,
+				struct vs_axon_rx_freelist_entry, list);
+		list_del(&buf->list);
+		status = transport_rx_queue_buffer(transport, buf, buf->laddr);
+		if (status < 0)
+			list_add(&buf->list, &transport->rx_freelist);
+		if (status <= 0)
+			break;
+	}
+	spin_unlock_irq(&transport->rx_alloc_lock);
+
+	/* Start the transfer */
+	recv_result = _okl4_sys_axon_process_recv(transport->rx_cap,
+			MAX_TRANSFER_CHUNK);
+
+	if (recv_result.error == OKL4_OK) {
+		status = 1;
+	} else {
+		status = okl4_error_to_errno(recv_result.error);
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev, "rx syscall fail: %d",
+				status);
+	}
+
+	/* Process the received messages */
+	while (status > 0)
+		status = transport_process_msg(transport);
+
+	if (status == -ENOMEM) {
+		/* Give kswapd some time to reclaim pages */
+		mod_timer(&transport->rx_retry_timer, jiffies + HZ);
+	} else if (status == -ENOBUFS) {
+		/*
+		 * Reschedule ourselves if more RX buffers are available,
+		 * otherwise do nothing until a buffer is freed
+		 */
+		spin_lock_irq(&transport->rx_alloc_lock);
+		if (!list_empty(&transport->rx_freelist))
+			tasklet_schedule(&transport->rx_tasklet);
+		spin_unlock_irq(&transport->rx_alloc_lock);
+	} else if (!status && !recv_result.send_empty) {
+		/* There are more messages waiting; reschedule */
+		tasklet_schedule(&transport->rx_tasklet);
+	} else if (status < 0 && status != -ECONNRESET) {
+		/* Something else went wrong, other than a reset */
+		dev_err(transport->axon_dev, "Fatal RX error %d\n", status);
+		transport_fatal_error(transport, "rx failure");
+	} else {
+		/* Axon is empty; wait for an RX interrupt */
+	}
+}
+
+static void transport_tx_tasklet(unsigned long data)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+	struct vs_mbuf_axon *mbuf;
+	vs_service_id_t service_id;
+	int err;
+
+	spin_lock_irq(&transport->readiness_lock);
+
+	/* Check to see if there is anything in the queue to send */
+	if (list_empty(&transport->tx_queue)) {
+		/*
+		 * Queue is empty, probably because a service reset cancelled
+		 * some pending messages. Nothing to do.
+		 */
+		spin_unlock_irq(&transport->readiness_lock);
+		return;
+	}
+
+	/*
+	 * Try to send the mbuf.  If it can't, the channel must be
+	 * full again so wait until the next can send event.
+	 */
+	mbuf = list_first_entry(&transport->tx_queue, struct vs_mbuf_axon,
+			base.queue);
+
+	service_id = transport_get_mbuf_service_id(transport,
+			mbuf_real_base(mbuf), NULL);
+
+	err = __transport_send(transport, mbuf, service_id,
+			VS_TRANSPORT_SEND_FLAGS_MORE);
+	if (err == -ENOSPC) {
+		/*
+		 * The channel is currently full. Leave the message in the
+		 * queue and try again when it has emptied.
+		 */
+		__transport_flush(transport);
+		goto out_unlock;
+	}
+	if (err) {
+		/*
+		 * We cannot properly handle a message send error here because
+		 * we have already returned success for the send to the service
+		 * driver when the message was queued. We don't want to leave
+		 * the message in the queue, since it could cause a DoS if the
+		 * error is persistent. Give up and force a transport reset.
+		 */
+		dev_err(transport->axon_dev,
+				"Failed to send queued mbuf: %d\n", err);
+		spin_unlock_irq(&transport->readiness_lock);
+		transport_fatal_error(transport, "queued send failure");
+		return;
+	}
+
+	/* Message sent, remove it from the queue and free the local copy */
+	list_del(&mbuf->base.queue);
+	transport_free_sent_mbuf(transport, mbuf);
+
+	/* Check to see if we have run out of messages to send */
+	if (list_empty(&transport->tx_queue)) {
+		/* Nothing left in the queue; flush and return */
+		__transport_flush(transport);
+	} else {
+		/* Reschedule to send the next message */
+		tasklet_schedule(&transport->tx_tasklet);
+	}
+
+out_unlock:
+	spin_unlock_irq(&transport->readiness_lock);
+}
+
+static void transport_rx_retry_timer(struct timer_list *t)
+{
+	struct vs_transport_axon *transport = from_timer(transport, t,
+                                                rx_retry_timer);
+
+	/* Try to receive again; hopefully we have memory now */
+	tasklet_schedule(&transport->rx_tasklet);
+}
+
+/* Transport device management */
+
+static int alloc_notify_info(struct device *dev, struct vs_notify_info **info,
+		int *info_size, int virqs)
+{
+	/* Each VIRQ can handle BITS_PER_LONG notifications */
+	*info_size = sizeof(struct vs_notify_info) * (virqs * BITS_PER_LONG);
+	*info = devm_kzalloc(dev, *info_size, GFP_KERNEL);
+	if (!(*info))
+		return -ENOMEM;
+
+	memset(*info, 0, *info_size);
+	return 0;
+}
+
+static int transport_axon_probe_virqs(struct vs_transport_axon *transport)
+{
+	struct device *device = transport->axon_dev;
+	struct device_node *axon_node = device->of_node;
+	struct device_node *vs_node = transport->of_node;
+	struct irq_data *irqd;
+	struct property *irqlines;
+	int ret, num_virq_lines;
+	struct device_node *virq_node = NULL;
+	u32 cap;
+	int i, irq_count;
+
+	if (of_irq_count(axon_node) < 2) {
+		dev_err(device, "Missing axon interrupts\n");
+		return -ENODEV;
+	}
+
+	irq_count = of_irq_count(vs_node);
+	if (irq_count < 1) {
+		dev_err(device, "Missing reset interrupt\n");
+		return -ENODEV;
+	} else if (irq_count > 1 + MAX_NOTIFICATION_LINES) {
+		dev_warn(device,
+			"Too many notification interrupts; only the first %d will be used\n",
+			MAX_NOTIFICATION_LINES);
+	}
+
+	/* Find the TX and RX axon IRQs and the reset IRQ */
+	transport->tx_irq = irq_of_parse_and_map(axon_node, 0);
+	if (!transport->tx_irq) {
+		dev_err(device, "No TX IRQ\n");
+		return -ENODEV;
+	}
+
+	transport->rx_irq = irq_of_parse_and_map(axon_node, 1);
+	if (!transport->rx_irq) {
+		dev_err(device, "No RX IRQ\n");
+		return -ENODEV;
+	}
+
+	transport->reset_irq = irq_of_parse_and_map(vs_node, 0);
+	if (!transport->reset_irq) {
+		dev_err(device, "No reset IRQ\n");
+		return -ENODEV;
+	}
+	irqd = irq_get_irq_data(transport->reset_irq);
+	if (!irqd) {
+		dev_err(device, "No reset IRQ data\n");
+		return -ENODEV;
+	}
+	transport->reset_okl4_irq = irqd_to_hwirq(irqd);
+
+	/* Find the notification IRQs */
+	transport->notify_rx_nirqs = irq_count - 1;
+	for (i = 0; i < transport->notify_rx_nirqs; i++) {
+		transport->notify_irq[i] = irq_of_parse_and_map(vs_node,
+				i + 1);
+		if (!transport->notify_irq[i]) {
+			dev_err(device, "Bad notify IRQ\n");
+			return -ENODEV;
+		}
+	}
+
+	/* Find all outgoing virq lines */
+	irqlines = of_find_property(vs_node, "okl,interrupt-lines", NULL);
+	if (!irqlines || irqlines->length < sizeof(u32)) {
+		dev_err(device, "No VIRQ sources found\n");
+		return -ENODEV;
+	}
+	num_virq_lines = irqlines->length / sizeof(u32);
+
+	virq_node = of_parse_phandle(vs_node, "okl,interrupt-lines", 0);
+	if (!virq_node) {
+		dev_err(device, "No reset VIRQ line object\n");
+		return -ENODEV;
+	}
+	ret = of_property_read_u32(virq_node, "reg", &cap);
+	if (ret || cap == OKL4_KCAP_INVALID) {
+		dev_err(device, "Bad reset VIRQ line\n");
+		return -ENODEV;
+	}
+	transport->reset_cap = cap;
+
+	transport->notify_tx_nirqs = num_virq_lines - 1;
+	for (i = 0; i < transport->notify_tx_nirqs; i++) {
+		virq_node = of_parse_phandle(vs_node, "okl,interrupt-lines",
+				i + 1);
+		if (!virq_node) {
+			dev_err(device, "No notify VIRQ line object\n");
+			return -ENODEV;
+		}
+		ret = of_property_read_u32(virq_node, "reg", &cap);
+		if (ret || cap == OKL4_KCAP_INVALID) {
+			dev_err(device, "Bad notify VIRQ line\n");
+			return -ENODEV;
+		}
+		transport->notify_cap[i] = cap;
+	}
+
+	return 0;
+}
+
+static int transport_axon_request_irqs(struct vs_transport_axon *transport)
+{
+	struct device *device = transport->axon_dev;
+	int i, ret;
+
+	ret = devm_request_irq(device, transport->reset_irq,
+			transport_axon_reset_irq, IRQF_TRIGGER_HIGH,
+			dev_name(transport->axon_dev), transport);
+	if (ret < 0)
+		return ret;
+
+	ret = devm_request_irq(device, transport->tx_irq,
+			transport_axon_tx_irq, IRQF_TRIGGER_HIGH,
+			dev_name(transport->axon_dev), transport);
+	if (ret < 0)
+		return ret;
+
+	ret = devm_request_irq(device, transport->rx_irq,
+			transport_axon_rx_irq, IRQF_TRIGGER_HIGH,
+			dev_name(transport->axon_dev), transport);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < transport->notify_rx_nirqs; i++) {
+		ret = devm_request_irq(device, transport->notify_irq[i],
+				transport_axon_notify_virq, IRQF_TRIGGER_HIGH,
+				dev_name(transport->axon_dev), transport);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int transport_axon_setup_descs(struct vs_transport_axon *transport)
+{
+	const int rx_buffer_order = ilog2(transport->msg_size +
+			sizeof(vs_service_id_t));
+	const size_t rx_queue_size = sizeof(*transport->rx) +
+		(sizeof(*transport->rx_descs) * transport->queue_size) +
+		(sizeof(*transport->rx_ptrs) * transport->queue_size);
+	const size_t tx_queue_size = sizeof(*transport->tx) +
+		(sizeof(*transport->tx_descs) * transport->queue_size);
+	const size_t queue_size = ALIGN(rx_queue_size,
+			__alignof__(*transport->tx)) + tx_queue_size;
+
+	struct _okl4_sys_mmu_lookup_pn_return lookup_return;
+	void *queue;
+	struct device_node *seg_node;
+	u32 seg_index;
+	okl4_kcap_t seg_cap;
+	okl4_error_t err;
+	dma_addr_t dma_handle;
+	const __be32 *prop;
+	int len, ret;
+
+	/*
+	 * Allocate memory for the queue descriptors.
+	 *
+	 * We allocate one block for both rx and tx because the minimum
+	 * allocation from dmam_alloc_coherent is usually a whole page.
+	 */
+	ret = -ENOMEM;
+	queue = dmam_alloc_coherent(transport->axon_dev, queue_size,
+			&dma_handle, GFP_KERNEL);
+	if (queue == NULL) {
+		dev_err(transport->axon_dev, "Failed to allocate %zd bytes for queue descriptors\n",
+				queue_size);
+		goto fail_alloc_dma;
+	}
+	memset(queue, 0, queue_size);
+
+	/*
+	 * Find the OKL4 physical segment object to attach to the axons.
+	 *
+	 * If the device has a CMA area, and the cell's memory segments have
+	 * not been split unnecessarily, then all allocations through the DMA
+	 * API for this device will be within a single segment. So, we can
+	 * simply look up the segment that contains the queue.
+	 *
+	 * The location and size of the CMA area can be configured elsewhere.
+	 * In 3.12 and later a device-specific area can be reserved via the
+	 * standard device tree reserved-memory properties. Otherwise, the
+	 * global area will be used, which has a size configurable on the
+	 * kernel command line and defaults to 16MB.
+	 */
+
+	/* Locate the physical segment */
+	ret = -ENODEV;
+	lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+			dma_handle >> OKL4_DEFAULT_PAGEBITS, -1);
+	err = okl4_mmu_lookup_index_geterror(&lookup_return.segment_index);
+	if (err == OKL4_ERROR_NOT_IN_SEGMENT) {
+		dev_err(transport->axon_dev,
+				"No segment found for DMA address %pK (%#llx)!\n",
+				queue, (unsigned long long)dma_handle);
+		goto fail_lookup_segment;
+	}
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev,
+				"Could not look up segment for DMA address %pK (%#llx): OKL4 error %d\n",
+				queue, (unsigned long long)dma_handle,
+				(int)err);
+		goto fail_lookup_segment;
+	}
+	seg_index = okl4_mmu_lookup_index_getindex(&lookup_return.segment_index);
+
+	dev_dbg(transport->axon_dev, "lookup pn %#lx got error %ld segment %ld count %lu offset %#lx\n",
+			(long)(dma_handle >> OKL4_DEFAULT_PAGEBITS),
+			(long)err, (long)seg_index,
+			(unsigned long)lookup_return.count_pn,
+			(unsigned long)lookup_return.offset_pn);
+
+	/* Locate the physical segment's OF node */
+	for_each_compatible_node(seg_node, NULL, "okl,microvisor-segment") {
+		u32 attach_index;
+		ret = of_property_read_u32(seg_node, "okl,segment-attachment",
+				&attach_index);
+		if (attach_index == seg_index)
+			break;
+	}
+	if (seg_node == NULL) {
+		ret = -ENXIO;
+		dev_err(transport->axon_dev, "No physical segment found for %pK\n",
+				queue);
+		goto fail_lookup_segment;
+	}
+
+	/* Determine the physical segment's cap */
+	prop = of_get_property(seg_node, "reg", &len);
+	ret = !!prop ? 0 : -EPERM;
+	if (!ret)
+		seg_cap = of_read_number(prop, of_n_addr_cells(seg_node));
+	if (!ret && seg_cap == OKL4_KCAP_INVALID)
+		ret = -ENXIO;
+	if (ret < 0) {
+		dev_err(transport->axon_dev, "missing physical-segment cap\n");
+		goto fail_lookup_segment;
+	}
+	transport->segment = seg_cap;
+	transport->segment_base =
+		(round_down(dma_handle >> OKL4_DEFAULT_PAGEBITS,
+			    lookup_return.count_pn) -
+		 lookup_return.offset_pn) << OKL4_DEFAULT_PAGEBITS;
+
+	dev_dbg(transport->axon_dev, "physical segment cap is %#lx, base %#llx\n",
+			(unsigned long)transport->segment,
+			(unsigned long long)transport->segment_base);
+
+	/* Attach the segment to the Axon endpoints */
+	err = _okl4_sys_axon_set_send_segment(transport->tx_cap,
+			transport->segment, transport->segment_base);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "TX attach failed: %d\n",
+				(int)err);
+		ret = okl4_error_to_errno(err);
+		goto fail_attach;
+	}
+
+	err = _okl4_sys_axon_set_recv_segment(transport->rx_cap,
+			transport->segment, transport->segment_base);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "RX attach failed: %d\n",
+				(int)err);
+		ret = okl4_error_to_errno(err);
+		goto fail_attach;
+	}
+
+	/* Array of pointers to the source TX pool for each outgoing buffer. */
+	transport->tx_pools = devm_kzalloc(transport->axon_dev,
+			sizeof(*transport->tx_pools) * transport->queue_size,
+			GFP_KERNEL);
+	if (!transport->tx_pools) {
+		err = -ENOMEM;
+		goto fail_alloc_tx_pools;
+	}
+
+	/* Set up the rx queue descriptors. */
+	transport->rx = queue;
+	transport->rx_phys = dma_handle;
+	transport->rx_size = rx_queue_size;
+	transport->rx_descs = (void *)(transport->rx + 1);
+	transport->rx_ptrs = (void *)(transport->rx_descs + transport->queue_size);
+	okl4_axon_queue_size_setallocorder(&transport->rx->queue_sizes[0],
+			rx_buffer_order);
+	transport->rx->queues[0].queue_offset = sizeof(*transport->rx);
+	transport->rx->queues[0].entries = transport->queue_size;
+	transport->rx->queues[0].uptr = 0;
+	transport->rx->queues[0].kptr = 0;
+	transport->rx_uptr_allocated = 0;
+
+	/* Set up the tx queue descriptors. */
+	transport->tx = queue + ALIGN(rx_queue_size,
+			__alignof__(*transport->tx));
+	transport->tx_phys = dma_handle + ((void *)transport->tx - queue);
+	transport->tx_size = tx_queue_size;
+	transport->tx_descs = (void *)(transport->tx + 1);
+	transport->tx->queues[0].queue_offset = sizeof(*transport->tx);
+	transport->tx->queues[0].entries = transport->queue_size;
+	transport->tx->queues[0].uptr = 0;
+	transport->tx->queues[0].kptr = 0;
+	transport->tx_uptr_freed = 0;
+
+	/* Create a DMA pool for the RX buffers. */
+	transport->rx_pool = dmam_pool_create("vs_axon_rx_pool",
+			transport->axon_dev, 1 << rx_buffer_order,
+			max(dma_get_cache_alignment(),
+				1 << OKL4_PRESHIFT_LADDR_AXON_DATA_INFO), 0);
+
+	return 0;
+
+fail_alloc_tx_pools:
+fail_attach:
+fail_lookup_segment:
+	dmam_free_coherent(transport->axon_dev, queue_size, queue, dma_handle);
+fail_alloc_dma:
+	return ret;
+}
+
+static void transport_axon_free_descs(struct vs_transport_axon *transport)
+{
+	int i;
+
+	tasklet_disable(&transport->rx_tasklet);
+	tasklet_kill(&transport->rx_tasklet);
+
+	tasklet_disable(&transport->tx_tasklet);
+	tasklet_kill(&transport->tx_tasklet);
+
+	cancel_delayed_work_sync(&transport->free_bufs_work);
+
+	transport->tx = NULL;
+	transport->tx_descs = NULL;
+
+	for (i = 0; i < transport->rx->queues[0].entries; i++) {
+		struct okl4_axon_queue_entry *desc = &transport->rx_descs[i];
+
+		if (okl4_axon_data_info_getusr(&desc->info)) {
+			void *ptr = transport->rx_ptrs[i];
+			dma_addr_t dma = okl4_axon_data_info_getladdr(&desc->info);
+			dma_pool_free(transport->rx_pool, ptr, dma);
+		}
+	}
+
+	transport->rx = NULL;
+	transport->rx_descs = NULL;
+	transport->rx_ptrs = NULL;
+
+	/* Let devm free the queues so we don't have to keep the dma handle */
+}
+
+static int transport_axon_probe(struct platform_device *dev)
+{
+	struct vs_transport_axon *priv = NULL;
+	u32 cap[2];
+	u32 queue_size, msg_size;
+	int ret, i;
+	const char* name;
+
+	if (!dev_get_cma_area(&dev->dev) && !okl4_single_physical_segment) {
+		dev_err(&dev->dev, "Multiple physical segments, but CMA is disabled\n");
+		return -ENOSYS;
+	}
+
+	dev->dev.coherent_dma_mask = ~(u64)0;
+	dev->dev.dma_ops = &axon_dma_ops;
+
+	priv = devm_kzalloc(&dev->dev, sizeof(struct vs_transport_axon) +
+			sizeof(unsigned long), GFP_KERNEL);
+	if (priv == NULL) {
+		dev_err(&dev->dev, "create transport object failed\n");
+		ret = -ENOMEM;
+		goto err_alloc_priv;
+	}
+	dev_set_drvdata(&dev->dev, priv);
+
+	priv->of_node = of_get_child_by_name(dev->dev.of_node,
+			"virtual-session");
+	if ((!priv->of_node) ||
+			(!of_device_is_compatible(priv->of_node,
+					"okl,virtual-session"))) {
+		dev_err(&dev->dev, "missing virtual-session node\n");
+		ret = -ENODEV;
+		goto error_of_node;
+	}
+
+	name = dev->dev.of_node->full_name;
+	of_property_read_string(dev->dev.of_node, "label", &name);
+
+	if (of_property_read_bool(priv->of_node, "okl,is-client")) {
+		priv->is_server = false;
+	} else if (of_property_read_bool(priv->of_node, "okl,is-server")) {
+		priv->is_server = true;
+	} else {
+		dev_err(&dev->dev, "virtual-session node is not marked as client or server\n");
+		ret = -ENODEV;
+		goto error_of_node;
+	}
+
+	priv->transport.vt = &tvt;
+	priv->transport.type = "microvisor";
+	priv->axon_dev = &dev->dev;
+
+	/* Read the Axon caps */
+	ret = of_property_read_u32_array(dev->dev.of_node, "reg", cap, 2);
+	if (ret < 0 || cap[0] == OKL4_KCAP_INVALID ||
+			cap[1] == OKL4_KCAP_INVALID) {
+		dev_err(&dev->dev, "missing axon endpoint caps\n");
+		ret = -ENODEV;
+		goto error_of_node;
+	}
+	priv->tx_cap = cap[0];
+	priv->rx_cap = cap[1];
+
+	/* Set transport properties; default to a 64kb buffer */
+	queue_size = 16;
+	(void)of_property_read_u32(priv->of_node, "okl,queue-length",
+			&queue_size);
+	priv->queue_size = max((size_t)queue_size, MIN_QUEUE_SIZE);
+
+	msg_size = PAGE_SIZE - sizeof(vs_service_id_t);
+	(void)of_property_read_u32(priv->of_node, "okl,message-size",
+			&msg_size);
+	priv->msg_size = max((size_t)msg_size, MIN_MSG_SIZE);
+
+	/*
+	 * Since the Axon API requires received message size limits to be
+	 * powers of two, we must round up the message size (including the
+	 * space reserved for the service ID).
+	 */
+	priv->msg_size = roundup_pow_of_two(priv->msg_size +
+			sizeof(vs_service_id_t)) - sizeof(vs_service_id_t);
+	if (priv->msg_size != msg_size)
+		dev_info(&dev->dev, "message size rounded up from %zd to %zd\n",
+				(size_t)msg_size, priv->msg_size);
+
+	INIT_LIST_HEAD(&priv->tx_queue);
+
+	/* Initialise the activation state, tasklets, and RX retry timer */
+	spin_lock_init(&priv->readiness_lock);
+	priv->readiness = VS_TRANSPORT_INIT;
+
+	tasklet_init(&priv->rx_tasklet, transport_rx_tasklet,
+		(unsigned long)priv);
+	tasklet_init(&priv->tx_tasklet, transport_tx_tasklet,
+		(unsigned long)priv);
+
+	INIT_DELAYED_WORK(&priv->free_bufs_work, transport_free_bufs_work);
+	spin_lock_init(&priv->rx_alloc_lock);
+	priv->rx_alloc_extra = 0;
+	INIT_LIST_HEAD(&priv->rx_freelist);
+
+	timer_setup(&priv->rx_retry_timer, transport_rx_retry_timer, 0);
+
+	/* Keep RX disabled until the core service is ready. */
+	tasklet_disable(&priv->rx_tasklet);
+
+	ret = transport_axon_probe_virqs(priv);
+	if (ret < 0)
+		goto err_probe_virqs;
+
+	if (priv->notify_rx_nirqs) {
+		ret = alloc_notify_info(&dev->dev, &priv->transport.notify_info,
+				&priv->transport.notify_info_size,
+				priv->notify_rx_nirqs);
+		if (ret < 0) {
+			dev_err(&dev->dev, "Alloc notify_info failed\n");
+			goto err_alloc_notify;
+		}
+	} else {
+		priv->transport.notify_info = NULL;
+		priv->transport.notify_info_size = 0;
+	}
+
+	priv->free_bufs_pool = transport_axon_init_tx_pool(priv, priv->msg_size,
+			FREE_BUFS_QUOTA);
+	if (IS_ERR(priv->free_bufs_pool)) {
+		ret = PTR_ERR(priv->free_bufs_pool);
+		goto err_init_free_bufs_pool;
+	}
+
+	ret = transport_axon_setup_descs(priv);
+	if (ret < 0)
+		goto err_setup_descs;
+
+	/* Allocate RX buffers for free bufs messages */
+	for (i = 0; i < FREE_BUFS_QUOTA; i++) {
+		dma_addr_t laddr;
+		struct vs_axon_rx_freelist_entry *buf =
+			dma_pool_alloc(priv->rx_pool, GFP_KERNEL, &laddr);
+		if (!buf)
+			goto err_alloc_rx_free_bufs;
+		buf->laddr = laddr;
+
+		spin_lock_irq(&priv->rx_alloc_lock);
+		list_add_tail(&buf->list, &priv->rx_freelist);
+		spin_unlock_irq(&priv->rx_alloc_lock);
+	}
+
+	/* Set up the session device */
+	priv->session_dev = vs_session_register(&priv->transport, &dev->dev,
+			priv->is_server, name);
+	if (IS_ERR(priv->session_dev)) {
+		ret = PTR_ERR(priv->session_dev);
+		dev_err(&dev->dev, "failed to register session: %d\n", ret);
+		goto err_session_register;
+	}
+
+	/*
+	 * Start the core service. Note that it can't actually communicate
+	 * until the initial reset completes.
+	 */
+	vs_session_start(priv->session_dev);
+
+	/*
+	 * Reset the transport. This will also set the Axons' segment
+	 * attachments, and eventually the Axons' queue pointers (once the
+	 * session marks the transport ready).
+	 */
+	transport_reset(&priv->transport);
+
+	/*
+	 * We're ready to start handling IRQs at this point, so register the
+	 * handlers.
+	 */
+	ret = transport_axon_request_irqs(priv);
+	if (ret < 0)
+		goto err_irq_register;
+
+	return 0;
+
+err_irq_register:
+	vs_session_unregister(priv->session_dev);
+err_session_register:
+err_alloc_rx_free_bufs:
+	transport_axon_free_descs(priv);
+err_setup_descs:
+	transport_axon_put_tx_pool(priv->free_bufs_pool);
+err_init_free_bufs_pool:
+	if (priv->transport.notify_info)
+		devm_kfree(&dev->dev, priv->transport.notify_info);
+err_alloc_notify:
+err_probe_virqs:
+	del_timer_sync(&priv->rx_retry_timer);
+	tasklet_kill(&priv->rx_tasklet);
+	tasklet_kill(&priv->tx_tasklet);
+	cancel_delayed_work_sync(&priv->free_bufs_work);
+error_of_node:
+	devm_kfree(&dev->dev, priv);
+err_alloc_priv:
+	return ret;
+}
+
+static int transport_axon_remove(struct platform_device *dev)
+{
+	struct vs_transport_axon *priv = dev_get_drvdata(&dev->dev);
+	int i;
+
+	for (i = 0; i < priv->notify_rx_nirqs; i++)
+		devm_free_irq(&dev->dev, priv->notify_irq[i], priv);
+
+	devm_free_irq(&dev->dev, priv->rx_irq, priv);
+	irq_dispose_mapping(priv->rx_irq);
+	devm_free_irq(&dev->dev, priv->tx_irq, priv);
+	irq_dispose_mapping(priv->tx_irq);
+	devm_free_irq(&dev->dev, priv->reset_irq, priv);
+	irq_dispose_mapping(priv->reset_irq);
+
+	del_timer_sync(&priv->rx_retry_timer);
+	tasklet_kill(&priv->rx_tasklet);
+	tasklet_kill(&priv->tx_tasklet);
+	cancel_delayed_work_sync(&priv->free_bufs_work);
+
+	priv->readiness = VS_TRANSPORT_SHUTDOWN;
+	vs_session_unregister(priv->session_dev);
+	WARN_ON(priv->readiness != VS_TRANSPORT_SHUTDOWN);
+
+	transport_axon_free_descs(priv);
+	transport_axon_put_tx_pool(priv->free_bufs_pool);
+
+	if (priv->transport.notify_info)
+		devm_kfree(&dev->dev, priv->transport.notify_info);
+
+	free_tx_mbufs(priv);
+
+	flush_workqueue(work_queue);
+
+	while (!list_empty(&priv->rx_freelist)) {
+		struct vs_axon_rx_freelist_entry *buf;
+		buf = list_first_entry(&priv->rx_freelist,
+				struct vs_axon_rx_freelist_entry, list);
+		list_del(&buf->list);
+		dma_pool_free(priv->rx_pool, buf, buf->laddr);
+	}
+
+	devm_kfree(&dev->dev, priv);
+	return 0;
+}
+
+static const struct of_device_id transport_axon_of_match[] = {
+	{ .compatible = "okl,microvisor-axon-transport", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, transport_axon_of_match);
+
+static struct platform_driver transport_axon_driver = {
+	.probe		= transport_axon_probe,
+	.remove		= transport_axon_remove,
+	.driver = {
+		.name		= DRIVER_NAME,
+		.bus		= &platform_bus_type,
+		.of_match_table = of_match_ptr(transport_axon_of_match),
+	},
+};
+
+static int __init vs_transport_axon_init(void)
+{
+	int ret;
+	okl4_error_t err;
+	struct device_node *cpus;
+	struct zone *zone;
+	struct _okl4_sys_mmu_lookup_pn_return lookup_return;
+	u32 last_seen_attachment = -1;
+	bool first_attachment;
+
+	printk(KERN_INFO "Virtual Services transport driver for OKL4 Axons\n");
+
+	/* Allocate the Axon cleanup workqueue */
+	work_queue = alloc_workqueue("axon_cleanup", 0, 0);
+	if (!work_queue) {
+		ret = -ENOMEM;
+		goto fail_create_workqueue;
+	}
+
+	/* Locate the MMU capability, needed for lookups */
+	cpus = of_find_node_by_path("/cpus");
+	if (IS_ERR_OR_NULL(cpus)) {
+		ret = -EINVAL;
+		goto fail_mmu_cap;
+	}
+	ret = of_property_read_u32(cpus, "okl,vmmu-capability", &okl4_mmu_cap);
+	if (ret) {
+		goto fail_mmu_cap;
+	}
+	if (okl4_mmu_cap == OKL4_KCAP_INVALID) {
+		printk(KERN_ERR "%s: OKL4 MMU capability not found\n", __func__);
+		ret = -EPERM;
+		goto fail_mmu_cap;
+	}
+
+	/*
+	 * Determine whether there are multiple OKL4 physical memory segments
+	 * in this Cell. If so, every transport device must have a valid CMA
+	 * region, to guarantee that its buffer allocations all come from the
+	 * segment that is attached to the axon endpoints.
+	 *
+	 * We assume that each zone is contiguously mapped in stage 2 with a
+	 * constant physical-to-IPA offset, typically 0. The weaver won't
+	 * violate this assumption for Linux (or other HLOS) guests unless it
+	 * is explicitly told to.
+	 */
+	okl4_single_physical_segment = true;
+	first_attachment = true;
+	for_each_zone(zone) {
+		u32 attachment;
+
+		/* We only care about zones that the page allocator is using */
+		if (!zone->managed_pages)
+			continue;
+
+		/* Find the segment at the start of the zone */
+		lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+				zone->zone_start_pfn, -1);
+		err = okl4_mmu_lookup_index_geterror(
+				&lookup_return.segment_index);
+		if (err != OKL4_OK) {
+			printk(KERN_WARNING "%s: Unable to determine physical segment count, assuming >1\n",
+					__func__);
+			okl4_single_physical_segment = false;
+			break;
+		}
+		attachment = okl4_mmu_lookup_index_getindex(
+				&lookup_return.segment_index);
+
+		if (first_attachment) {
+			last_seen_attachment = attachment;
+			first_attachment = false;
+		} else if (last_seen_attachment != attachment) {
+			okl4_single_physical_segment = false;
+			break;
+		}
+
+		/* Find the segment at the end of the zone */
+		lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+				zone_end_pfn(zone) - 1, -1);
+		err = okl4_mmu_lookup_index_geterror(
+				&lookup_return.segment_index);
+		if (err != OKL4_OK) {
+			printk(KERN_WARNING "%s: Unable to determine physical segment count, assuming >1\n",
+					__func__);
+			okl4_single_physical_segment = false;
+			break;
+		}
+		attachment = okl4_mmu_lookup_index_getindex(
+				&lookup_return.segment_index);
+
+		/* Check that it's still the same segment */
+		if (last_seen_attachment != attachment) {
+			okl4_single_physical_segment = false;
+			break;
+		}
+	}
+
+#ifdef DEBUG
+	printk(KERN_DEBUG "%s: physical segment count %s\n", __func__,
+			okl4_single_physical_segment ? "1" : ">1");
+#endif
+
+	mbuf_cache = KMEM_CACHE(vs_mbuf_axon, 0UL);
+	if (!mbuf_cache) {
+		ret = -ENOMEM;
+		goto kmem_cache_failed;
+	}
+
+	ret = platform_driver_register(&transport_axon_driver);
+	if (ret)
+		goto register_plat_driver_failed;
+
+	return ret;
+
+register_plat_driver_failed:
+	kmem_cache_destroy(mbuf_cache);
+	mbuf_cache = NULL;
+kmem_cache_failed:
+fail_mmu_cap:
+	if (work_queue)
+		destroy_workqueue(work_queue);
+fail_create_workqueue:
+	return ret;
+}
+
+static void __exit vs_transport_axon_exit(void)
+{
+	platform_driver_unregister(&transport_axon_driver);
+
+	rcu_barrier();
+
+    kmem_cache_destroy(mbuf_cache);
+	mbuf_cache = NULL;
+
+	if (work_queue)
+		destroy_workqueue(work_queue);
+}
+
+module_init(vs_transport_axon_init);
+module_exit(vs_transport_axon_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/include/Kbuild b/include/Kbuild
new file mode 100644
index 0000000..9205b04
--- /dev/null
+++ b/include/Kbuild
@@ -0,0 +1,6 @@
+# Top-level Makefile calls into asm-$(ARCH)
+# List only non-arch directories below
+
+ifneq ($(VSERVICES_SUPPORT), "")
+header-y += vservices/
+endif
diff --git a/include/asm-generic/okl4_virq.h b/include/asm-generic/okl4_virq.h
new file mode 100644
index 0000000..2eca110
--- /dev/null
+++ b/include/asm-generic/okl4_virq.h
@@ -0,0 +1,27 @@
+/*
+ * include/asm-generic/okl4_virq.h
+ *
+ * Copyright (c) 2017 General Dynamics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OKL4_VIRQ_H__
+#define __OKL4_VIRQ_H__
+
+#include <linux/irq.h>
+#include <microvisor/microvisor.h>
+
+static inline okl4_virq_flags_t okl4_get_virq_payload(unsigned int irq)
+{
+	struct irq_data *irqd = irq_get_irq_data(irq);
+
+	if (WARN_ON_ONCE(!irqd))
+		return 0;
+
+	return _okl4_sys_interrupt_get_payload(irqd_to_hwirq(irqd)).payload;
+}
+
+#endif
diff --git a/include/crypto/ice.h b/include/crypto/ice.h
new file mode 100644
index 0000000..a9867cf
--- /dev/null
+++ b/include/crypto/ice.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _QCOM_INLINE_CRYPTO_ENGINE_H_
+#define _QCOM_INLINE_CRYPTO_ENGINE_H_
+
+#include <linux/platform_device.h>
+
+struct request;
+
+enum ice_cryto_algo_mode {
+	ICE_CRYPTO_ALGO_MODE_AES_ECB = 0x0,
+	ICE_CRYPTO_ALGO_MODE_AES_XTS = 0x3,
+};
+
+enum ice_crpto_key_size {
+	ICE_CRYPTO_KEY_SIZE_128 = 0x0,
+	ICE_CRYPTO_KEY_SIZE_256 = 0x2,
+};
+
+enum ice_crpto_key_mode {
+	ICE_CRYPTO_USE_KEY0_HW_KEY = 0x0,
+	ICE_CRYPTO_USE_KEY1_HW_KEY = 0x1,
+	ICE_CRYPTO_USE_LUT_SW_KEY0 = 0x2,
+	ICE_CRYPTO_USE_LUT_SW_KEY  = 0x3
+};
+
+struct ice_crypto_setting {
+	enum ice_crpto_key_size		key_size;
+	enum ice_cryto_algo_mode	algo_mode;
+	enum ice_crpto_key_mode		key_mode;
+	short				key_index;
+
+};
+
+struct ice_data_setting {
+	struct ice_crypto_setting	crypto_data;
+	bool				sw_forced_context_switch;
+	bool				decr_bypass;
+	bool				encr_bypass;
+};
+
+/* MSM ICE Crypto Data Unit of target DUN of Transfer Request */
+enum ice_crypto_data_unit {
+	ICE_CRYPTO_DATA_UNIT_512_B          = 0,
+	ICE_CRYPTO_DATA_UNIT_1_KB           = 1,
+	ICE_CRYPTO_DATA_UNIT_2_KB           = 2,
+	ICE_CRYPTO_DATA_UNIT_4_KB           = 3,
+	ICE_CRYPTO_DATA_UNIT_8_KB           = 4,
+	ICE_CRYPTO_DATA_UNIT_16_KB          = 5,
+	ICE_CRYPTO_DATA_UNIT_32_KB          = 6,
+	ICE_CRYPTO_DATA_UNIT_64_KB          = 7,
+};
+
+typedef void (*ice_error_cb)(void *, u32 error);
+
+struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node);
+struct platform_device *qcom_ice_get_pdevice(struct device_node *node);
+
+#ifdef CONFIG_CRYPTO_DEV_QCOM_ICE
+int qcom_ice_setup_ice_hw(const char *storage_type, int enable);
+void qcom_ice_set_fde_flag(int flag);
+#else
+static inline int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
+{
+	return 0;
+}
+static inline void qcom_ice_set_fde_flag(int flag) {}
+#endif
+
+struct qcom_ice_variant_ops {
+	const char *name;
+	int	(*init)(struct platform_device *device_init, void *init_data,
+				ice_error_cb err);
+	int	(*reset)(struct platform_device *device_reset);
+	int	(*resume)(struct platform_device *device_resume);
+	int	(*suspend)(struct platform_device *device_suspend);
+	int	(*config_start)(struct platform_device *device_start,
+			struct request *req, struct ice_data_setting *setting,
+			bool start);
+	int	(*config_end)(struct request *req);
+	int	(*status)(struct platform_device *device_status);
+	void	(*debug)(struct platform_device *device_debug);
+};
+
+#endif /* _QCOM_INLINE_CRYPTO_ENGINE_H_ */
diff --git a/include/dt-bindings/clock/qcom,npucc-kona.h b/include/dt-bindings/clock/qcom,npucc-kona.h
index 2d9112d..9c3f3aed 100644
--- a/include/dt-bindings/clock/qcom,npucc-kona.h
+++ b/include/dt-bindings/clock/qcom,npucc-kona.h
@@ -4,57 +4,59 @@
 #ifndef _DT_BINDINGS_CLK_QCOM_NPU_CC_KONA_H
 #define _DT_BINDINGS_CLK_QCOM_NPU_CC_KONA_H
 
-#define NPU_CC_AON_CLK						0
-#define NPU_CC_ATB_CLK						1
-#define NPU_CC_BTO_CORE_CLK					2
-#define NPU_CC_BWMON_CLK					3
-#define NPU_CC_CAL_HM0_CDC_CLK					4
-#define NPU_CC_CAL_HM0_CLK					5
-#define NPU_CC_CAL_HM0_CLK_SRC					6
-#define NPU_CC_CAL_HM0_DPM_IP_CLK				7
-#define NPU_CC_CAL_HM0_PERF_CNT_CLK				8
-#define NPU_CC_CAL_HM1_CDC_CLK					9
-#define NPU_CC_CAL_HM1_CLK					10
-#define NPU_CC_CAL_HM1_CLK_SRC					11
-#define NPU_CC_CAL_HM1_DPM_IP_CLK				12
-#define NPU_CC_CAL_HM1_PERF_CNT_CLK				13
-#define NPU_CC_CORE_CLK						14
-#define NPU_CC_CORE_CLK_SRC					15
-#define NPU_CC_DL_DPM_CLK					16
-#define NPU_CC_DL_LLM_CLK					17
-#define NPU_CC_DPM_CLK						18
-#define NPU_CC_DPM_TEMP_CLK					19
-#define NPU_CC_DPM_XO_CLK					20
-#define NPU_CC_DSP_AHBM_CLK					21
-#define NPU_CC_DSP_AHBS_CLK					22
-#define NPU_CC_DSP_AXI_CLK					23
-#define NPU_CC_DSP_BWMON_AHB_CLK				24
-#define NPU_CC_DSP_BWMON_CLK					25
-#define NPU_CC_ISENSE_CLK					26
-#define NPU_CC_LLM_CLK						27
-#define NPU_CC_LLM_CURR_CLK					28
-#define NPU_CC_LLM_TEMP_CLK					29
-#define NPU_CC_LLM_XO_CLK					30
-#define NPU_CC_LMH_CLK_SRC					31
-#define NPU_CC_NOC_AHB_CLK					32
-#define NPU_CC_NOC_AXI_CLK					33
-#define NPU_CC_NOC_DMA_CLK					34
-#define NPU_CC_PLL0						35
-#define NPU_CC_PLL0_OUT_EVEN					36
-#define NPU_CC_PLL1						37
-#define NPU_CC_PLL1_OUT_EVEN					38
-#define NPU_CC_RSC_XO_CLK					39
-#define NPU_CC_S2P_CLK						40
-#define NPU_CC_XO_CLK						41
-#define NPU_CC_XO_CLK_SRC					42
-#define NPU_DSP_CORE_CLK_SRC					43
-#define NPU_Q6SS_PLL						44
+#define NPU_CC_ATB_CLK						0
+#define NPU_CC_BTO_CORE_CLK					1
+#define NPU_CC_BWMON_CLK					2
+#define NPU_CC_CAL_HM0_CDC_CLK					3
+#define NPU_CC_CAL_HM0_CLK					4
+#define NPU_CC_CAL_HM0_CLK_SRC					5
+#define NPU_CC_CAL_HM0_DPM_IP_CLK				6
+#define NPU_CC_CAL_HM0_PERF_CNT_CLK				7
+#define NPU_CC_CAL_HM1_CDC_CLK					8
+#define NPU_CC_CAL_HM1_CLK					9
+#define NPU_CC_CAL_HM1_CLK_SRC					10
+#define NPU_CC_CAL_HM1_DPM_IP_CLK				11
+#define NPU_CC_CAL_HM1_PERF_CNT_CLK				12
+#define NPU_CC_CORE_CLK						13
+#define NPU_CC_CORE_CLK_SRC					14
+#define NPU_CC_DL_DPM_CLK					15
+#define NPU_CC_DL_LLM_CLK					16
+#define NPU_CC_DPM_CLK						17
+#define NPU_CC_DPM_TEMP_CLK					18
+#define NPU_CC_DPM_XO_CLK					19
+#define NPU_CC_DSP_AHBM_CLK					20
+#define NPU_CC_DSP_AHBS_CLK					21
+#define NPU_CC_DSP_AXI_CLK					22
+#define NPU_CC_DSP_BWMON_AHB_CLK				23
+#define NPU_CC_DSP_BWMON_CLK					24
+#define NPU_CC_ISENSE_CLK					25
+#define NPU_CC_LLM_CLK						26
+#define NPU_CC_LLM_CURR_CLK					27
+#define NPU_CC_LLM_TEMP_CLK					28
+#define NPU_CC_LLM_XO_CLK					29
+#define NPU_CC_LMH_CLK_SRC					30
+#define NPU_CC_NOC_AHB_CLK					31
+#define NPU_CC_NOC_AXI_CLK					32
+#define NPU_CC_NOC_DMA_CLK					33
+#define NPU_CC_PLL0						34
+#define NPU_CC_PLL0_OUT_EVEN					35
+#define NPU_CC_PLL1						36
+#define NPU_CC_PLL1_OUT_EVEN					37
+#define NPU_CC_RSC_XO_CLK					38
+#define NPU_CC_S2P_CLK						39
+#define NPU_CC_XO_CLK						40
+#define NPU_CC_XO_CLK_SRC					41
+#define NPU_DSP_CORE_CLK_SRC					42
+#define NPU_Q6SS_PLL						43
 
 #define CORE_GDSC						0
 
 #define NPU_CC_CAL_HM0_BCR					0
 #define NPU_CC_CAL_HM1_BCR					1
 #define NPU_CC_CORE_BCR						2
-#define NPU_CC_DSP_BCR						3
+#define NPU_CC_DPM_TEMP_CLK_ARES				3
+#define NPU_CC_DSP_BCR						4
+#define NPU_CC_LLM_CURR_CLK_ARES				5
+#define NPU_CC_LLM_TEMP_CLK_ARES				6
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index 0a9bc3c..2b122c1 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
 
 #ifndef _DT_BINDINGS_CLK_MSM_RPMH_H
 #define _DT_BINDINGS_CLK_MSM_RPMH_H
@@ -7,15 +7,21 @@
 /* RPMh controlled clocks */
 #define RPMH_CXO_CLK				0
 #define RPMH_CXO_CLK_A				1
-#define RPMH_LN_BB_CLK2				2
-#define RPMH_LN_BB_CLK2_A			3
-#define RPMH_LN_BB_CLK3				4
-#define RPMH_LN_BB_CLK3_A			5
-#define RPMH_RF_CLK1				6
-#define RPMH_RF_CLK1_A				7
-#define RPMH_RF_CLK2				8
-#define RPMH_RF_CLK2_A				9
-#define RPMH_RF_CLK3				10
-#define RPMH_RF_CLK3_A				11
+#define RPMH_LN_BB_CLK1				2
+#define RPMH_LN_BB_CLK1_A			3
+#define RPMH_LN_BB_CLK2				4
+#define RPMH_LN_BB_CLK2_A			5
+#define RPMH_LN_BB_CLK3				6
+#define RPMH_LN_BB_CLK3_A			7
+#define RPMH_RF_CLK1				8
+#define RPMH_RF_CLK1_A				9
+#define RPMH_RF_CLK2				10
+#define RPMH_RF_CLK2_A				11
+#define RPMH_RF_CLK3				12
+#define RPMH_RF_CLK3_A				13
+#define RPMH_RF_CLKD3				14
+#define RPMH_RF_CLKD3_A				15
+#define RPMH_RF_CLKD4				16
+#define RPMH_RF_CLKD4_A				17
 
 #endif
diff --git a/include/linux/Kbuild.vservices b/include/linux/Kbuild.vservices
new file mode 100644
index 0000000..392f559
--- /dev/null
+++ b/include/linux/Kbuild.vservices
@@ -0,0 +1,3 @@
+#
+# Virtual Services headers which need to be exported for user-space
+#
diff --git a/include/linux/pfk.h b/include/linux/pfk.h
new file mode 100644
index 0000000..2849a93
--- /dev/null
+++ b/include/linux/pfk.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef PFK_H_
+#define PFK_H_
+
+#include <linux/bio.h>
+
+struct ice_crypto_setting;
+
+#ifdef CONFIG_PFK
+
+/*
+ * Default key for inline encryption.
+ *
+ * For now only AES-256-XTS is supported, so this is a fixed length.  But if
+ * ever needed, this should be made variable-length with a 'mode' and 'size'.
+ * (Remember to update pfk_allow_merge_bio() when doing so!)
+ */
+#define BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS 64
+
+struct blk_encryption_key {
+	u8 raw[BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS];
+};
+
+int pfk_load_key_start(const struct bio *bio,
+			struct ice_crypto_setting *ice_setting,
+				bool *is_pfe, bool async);
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe);
+int pfk_remove_key(const unsigned char *key, size_t key_size);
+int pfk_fbe_clear_key(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size);
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2);
+void pfk_clear_on_reset(void);
+
+#else
+static inline int pfk_load_key_start(const struct bio *bio,
+	struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
+{
+	return -ENODEV;
+}
+
+static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+	return -ENODEV;
+}
+
+static inline int pfk_remove_key(const unsigned char *key, size_t key_size)
+{
+	return -ENODEV;
+}
+
+static inline bool pfk_allow_merge_bio(const struct bio *bio1,
+		const struct bio *bio2)
+{
+	return true;
+}
+
+static inline int pfk_fbe_clear_key(const unsigned char *key, size_t key_size,
+			const unsigned char *salt, size_t salt_size)
+{
+	return -ENODEV;
+}
+
+static inline void pfk_clear_on_reset(void)
+{}
+
+#endif /* CONFIG_PFK */
+
+#endif /* PFK_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 808fbfe..118ca76 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -323,6 +323,10 @@
 	wait_queue_head_t write_wait;
 	wait_queue_head_t read_wait;
 	struct work_struct hangup_work;
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+	int delayed_work;
+	struct delayed_work echo_delayed_work;
+#endif
 	void *disc_data;
 	void *driver_data;
 	spinlock_t files_lock;		/* protects tty_files list */
diff --git a/include/microvisor/kernel/microvisor.h b/include/microvisor/kernel/microvisor.h
new file mode 100644
index 0000000..1a30d1f
--- /dev/null
+++ b/include/microvisor/kernel/microvisor.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+/** @addtogroup lib_microvisor
+ * @{
+ */
+
+
+#ifndef __AUTO__MICROVISOR_H__
+#define __AUTO__MICROVISOR_H__
+
+/** SDK Major number */
+#define OKL4_SDK_VERSION_MAJOR 5
+/** SDK Minor number */
+#define OKL4_SDK_VERSION_MINOR 3
+/**
+ * If defined, indicates this is an internal development version.
+ * In this case, OKL4_SDK_VERSION_RELEASE == -1
+ */
+#define OKL4_SDK_VERSION_DEVELOPMENT 1
+/** SDK Release (revision) number */
+#define OKL4_SDK_VERSION_RELEASE (-1)
+/** SDK Maintenance number. Indicates the maintenance sequence revision. */
+#define OKL4_SDK_VERSION_MAINTENANCE 0
+
+
+/** @addtogroup lib_microvisor_helpers Microvisor Helpers
+ * @{
+ */
+
+/** Common C and ASM defines. */
+
+/** OKL4 Kernel supports a Virtual CPU (vCPU) interface. */
+#define OKL4_VCPU_SUPPORT
+
+
+/** OKL4 Kernel vCPU API supports SMP guest cells. */
+#define OKL4_VCPU_SMP_SUPPORT
+
+
+/** @} */
+#endif /* __AUTO__MICROVISOR_H__ */
+/** @} */
diff --git a/include/microvisor/kernel/offsets.h b/include/microvisor/kernel/offsets.h
new file mode 100644
index 0000000..9517acf
--- /dev/null
+++ b/include/microvisor/kernel/offsets.h
@@ -0,0 +1,1534 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+#ifndef __AUTO__MICROVISOR_OFFSETS_H__
+#define __AUTO__MICROVISOR_OFFSETS_H__
+
+#if defined(ASSEMBLY)
+/* LWEE structure's type offsets */
+
+/**
+ *   Offsets for struct okl4_atomic_register
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_REGISTER_SIZE) */
+#define OKL4_STRUCT_ATOMIC_REGISTER_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ATOMIC_REGISTER_VALUE) */
+#define OKL4_OFS_ATOMIC_REGISTER_VALUE (0)
+/**
+ *   Offsets for struct okl4_atomic_uint16
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT16_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT16_SIZE (2)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT16_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT16_VALUE (0)
+/**
+ *   Offsets for struct okl4_atomic_uint32
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT32_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT32_SIZE (4)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT32_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT32_VALUE (0)
+/**
+ *   Offsets for struct okl4_atomic_uint64
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT64_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT64_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT64_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT64_VALUE (0)
+/**
+ *   Offsets for struct okl4_atomic_uint8
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT8_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT8_SIZE (1)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT8_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT8_VALUE (0)
+/**
+ *   Offsets for struct okl4_axon_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_DATA_SIZE) */
+#define OKL4_STRUCT_AXON_DATA_SIZE (12)
+/*lint -esym(621, OKL4_OFS_AXON_DATA_KCAP) */
+#define OKL4_OFS_AXON_DATA_KCAP (0)
+/*lint -esym(621, OKL4_OFS_AXON_DATA_SEGMENT) */
+#define OKL4_OFS_AXON_DATA_SEGMENT (4)
+/*lint -esym(621, OKL4_OFS_AXON_DATA_VIRQ) */
+#define OKL4_OFS_AXON_DATA_VIRQ (8)
+/**
+ *   Offsets for struct okl4_axon_ep_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_EP_DATA_SIZE) */
+#define OKL4_STRUCT_AXON_EP_DATA_SIZE (24)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX) */
+#define OKL4_OFS_AXON_EP_DATA_RX (0)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX_KCAP) */
+#define OKL4_OFS_AXON_EP_DATA_RX_KCAP (0)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX_SEGMENT) */
+#define OKL4_OFS_AXON_EP_DATA_RX_SEGMENT (4)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX_VIRQ) */
+#define OKL4_OFS_AXON_EP_DATA_RX_VIRQ (8)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX) */
+#define OKL4_OFS_AXON_EP_DATA_TX (12)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX_KCAP) */
+#define OKL4_OFS_AXON_EP_DATA_TX_KCAP (12)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX_SEGMENT) */
+#define OKL4_OFS_AXON_EP_DATA_TX_SEGMENT (16)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX_VIRQ) */
+#define OKL4_OFS_AXON_EP_DATA_TX_VIRQ (20)
+/**
+ *   Offsets for struct okl4_axon_queue
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_QUEUE_SIZE) */
+#define OKL4_STRUCT_AXON_QUEUE_SIZE (12)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_QUEUE_QUEUE_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRIES) */
+#define OKL4_OFS_AXON_QUEUE_ENTRIES (4)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_KPTR) */
+#define OKL4_OFS_AXON_QUEUE_KPTR (6)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_UPTR) */
+#define OKL4_OFS_AXON_QUEUE_UPTR (8)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE___PADDING0_2) */
+#define OKL4_OFS_AXON_QUEUE___PADDING0_2 (10)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE___PADDING1_3) */
+#define OKL4_OFS_AXON_QUEUE___PADDING1_3 (11)
+/**
+ *   Offsets for struct okl4_axon_queue_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_QUEUE_ENTRY_SIZE) */
+#define OKL4_STRUCT_AXON_QUEUE_ENTRY_SIZE (24)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY_INFO) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY_INFO (0)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY_DATA_SIZE) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY_DATA_SIZE (8)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY_RECV_SEQUENCE) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY_RECV_SEQUENCE (16)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING0_4) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING0_4 (20)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING1_5) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING1_5 (21)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING2_6) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING2_6 (22)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING3_7) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING3_7 (23)
+/**
+ *   Offsets for struct okl4_axon_rx
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_RX_SIZE) */
+#define OKL4_STRUCT_AXON_RX_SIZE (56)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES) */
+#define OKL4_OFS_AXON_RX_QUEUES (0)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0) */
+#define OKL4_OFS_AXON_RX_QUEUES_0 (0)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_QUEUE_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_ENTRIES (4)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_KPTR (6)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_UPTR (8)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_0___PADDING0_2 (10)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_0___PADDING1_3 (11)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1) */
+#define OKL4_OFS_AXON_RX_QUEUES_1 (12)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_QUEUE_OFFSET (12)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_ENTRIES (16)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_KPTR (18)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_UPTR (20)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_1___PADDING0_2 (22)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_1___PADDING1_3 (23)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_2 (24)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_QUEUE_OFFSET (24)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_ENTRIES (28)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_KPTR (30)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_UPTR (32)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_2___PADDING0_2 (34)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_2___PADDING1_3 (35)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_3 (36)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_QUEUE_OFFSET (36)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_ENTRIES (40)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_KPTR (42)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_UPTR (44)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_3___PADDING0_2 (46)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_3___PADDING1_3 (47)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES (48)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_0) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_0 (48)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_1) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_1 (50)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_2) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_2 (52)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_3) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_3 (54)
+/**
+ *   Offsets for struct okl4_axon_tx
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_TX_SIZE) */
+#define OKL4_STRUCT_AXON_TX_SIZE (48)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES) */
+#define OKL4_OFS_AXON_TX_QUEUES (0)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0) */
+#define OKL4_OFS_AXON_TX_QUEUES_0 (0)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_QUEUE_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_ENTRIES (4)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_KPTR (6)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_UPTR (8)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_0___PADDING0_2 (10)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_0___PADDING1_3 (11)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1) */
+#define OKL4_OFS_AXON_TX_QUEUES_1 (12)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_QUEUE_OFFSET (12)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_ENTRIES (16)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_KPTR (18)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_UPTR (20)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_1___PADDING0_2 (22)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_1___PADDING1_3 (23)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_2 (24)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_QUEUE_OFFSET (24)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_ENTRIES (28)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_KPTR (30)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_UPTR (32)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_2___PADDING0_2 (34)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_2___PADDING1_3 (35)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_3 (36)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_QUEUE_OFFSET (36)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_ENTRIES (40)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_KPTR (42)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_UPTR (44)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_3___PADDING0_2 (46)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_3___PADDING1_3 (47)
+/**
+ *   Offsets for struct okl4_range_item
+ **/
+/*lint -esym(621, OKL4_STRUCT_RANGE_ITEM_SIZE) */
+#define OKL4_STRUCT_RANGE_ITEM_SIZE (16)
+/*lint -esym(621, OKL4_OFS_RANGE_ITEM_BASE) */
+#define OKL4_OFS_RANGE_ITEM_BASE (0)
+/*lint -esym(621, OKL4_OFS_RANGE_ITEM_SIZE) */
+#define OKL4_OFS_RANGE_ITEM_SIZE (8)
+/**
+ *   Offsets for struct okl4_virtmem_item
+ **/
+/*lint -esym(621, OKL4_STRUCT_VIRTMEM_ITEM_SIZE) */
+#define OKL4_STRUCT_VIRTMEM_ITEM_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_ITEM_RANGE) */
+#define OKL4_OFS_VIRTMEM_ITEM_RANGE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_ITEM_RANGE_BASE) */
+#define OKL4_OFS_VIRTMEM_ITEM_RANGE_BASE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_ITEM_RANGE_SIZE) */
+#define OKL4_OFS_VIRTMEM_ITEM_RANGE_SIZE (8)
+/**
+ *   Offsets for struct okl4_cell_management_item
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_ITEM_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_ITEM_SIZE (104)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_ENTRY) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_ENTRY (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_BASE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_BASE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_SIZE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DATA) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DATA (24)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_IMAGE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_IMAGE (32)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MMU) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MMU (40)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING0_4) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING0_4 (44)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING1_5) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING1_5 (45)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING2_6) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING2_6 (46)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING3_7) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING3_7 (47)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_NAME) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_NAME (48)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_REGISTERS_CAP) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_REGISTERS_CAP (56)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_RESET_VIRQ) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_RESET_VIRQ (60)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENT_INDEX) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENT_INDEX (64)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING4_4) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING4_4 (68)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING5_5) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING5_5 (69)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING6_6) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING6_6 (70)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING7_7) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING7_7 (71)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENTS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENTS (72)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_VCPUS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_VCPUS (80)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_BOOT_ONCE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_BOOT_ONCE (88)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_CAN_STOP) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_CAN_STOP (89)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DEFERRED) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DEFERRED (90)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DETACHED) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DETACHED (91)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_ERASE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_ERASE (92)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING8_5) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING8_5 (93)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING9_6) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING9_6 (94)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING10_7) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING10_7 (95)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DTB_ADDRESS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DTB_ADDRESS (96)
+/**
+ *   Offsets for struct okl4_cell_management
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_SIZE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_NUM_ITEMS) */
+#define OKL4_OFS_CELL_MANAGEMENT_NUM_ITEMS (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING0_4) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING1_5) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING2_6) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING3_7) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEMS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEMS (8)
+/**
+ *   Offsets for struct okl4_segment_mapping
+ **/
+/*lint -esym(621, OKL4_STRUCT_SEGMENT_MAPPING_SIZE) */
+#define OKL4_STRUCT_SEGMENT_MAPPING_SIZE (32)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_PHYS_ADDR) */
+#define OKL4_OFS_SEGMENT_MAPPING_PHYS_ADDR (0)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_SIZE) */
+#define OKL4_OFS_SEGMENT_MAPPING_SIZE (8)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_VIRT_ADDR) */
+#define OKL4_OFS_SEGMENT_MAPPING_VIRT_ADDR (16)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_CAP) */
+#define OKL4_OFS_SEGMENT_MAPPING_CAP (24)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_DEVICE) */
+#define OKL4_OFS_SEGMENT_MAPPING_DEVICE (28)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_OWNED) */
+#define OKL4_OFS_SEGMENT_MAPPING_OWNED (29)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING___PADDING0_6) */
+#define OKL4_OFS_SEGMENT_MAPPING___PADDING0_6 (30)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING___PADDING1_7) */
+#define OKL4_OFS_SEGMENT_MAPPING___PADDING1_7 (31)
+/**
+ *   Offsets for struct okl4_cell_management_segments
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_SEGMENTS_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_SEGMENTS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_FREE_SEGMENTS) */
+#define OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_FREE_SEGMENTS (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_NUM_SEGMENTS) */
+#define OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_NUM_SEGMENTS (4)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_SEGMENT_MAPPINGS) */
+#define OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_SEGMENT_MAPPINGS (8)
+/**
+ *   Offsets for struct okl4_cell_management_vcpus
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_VCPUS_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_VCPUS_SIZE (4)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_VCPUS_NUM_VCPUS) */
+#define OKL4_OFS_CELL_MANAGEMENT_VCPUS_NUM_VCPUS (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_VCPUS_VCPU_CAPS) */
+#define OKL4_OFS_CELL_MANAGEMENT_VCPUS_VCPU_CAPS (4)
+/**
+ *   Offsets for struct _okl4_env_hdr
+ **/
+/*lint -esym(621, _OKL4_STRUCT_ENV_HDR_SIZE) */
+#define _OKL4_STRUCT_ENV_HDR_SIZE (4)
+/*lint -esym(621, _OKL4_OFS_ENV_HDR_MAGIC) */
+#define _OKL4_OFS_ENV_HDR_MAGIC (0)
+/*lint -esym(621, _OKL4_OFS_ENV_HDR_COUNT) */
+#define _OKL4_OFS_ENV_HDR_COUNT (2)
+/**
+ *   Offsets for struct _okl4_env_item
+ **/
+/*lint -esym(621, _OKL4_STRUCT_ENV_ITEM_SIZE) */
+#define _OKL4_STRUCT_ENV_ITEM_SIZE (16)
+/*lint -esym(621, _OKL4_OFS_ENV_ITEM_NAME) */
+#define _OKL4_OFS_ENV_ITEM_NAME (0)
+/*lint -esym(621, _OKL4_OFS_ENV_ITEM_ITEM) */
+#define _OKL4_OFS_ENV_ITEM_ITEM (8)
+/**
+ *   Offsets for struct _okl4_env
+ **/
+/*lint -esym(621, _OKL4_STRUCT_ENV_SIZE) */
+#define _OKL4_STRUCT_ENV_SIZE (8)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_HDR) */
+#define _OKL4_OFS_ENV_ENV_HDR (0)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_HDR_MAGIC) */
+#define _OKL4_OFS_ENV_ENV_HDR_MAGIC (0)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_HDR_COUNT) */
+#define _OKL4_OFS_ENV_ENV_HDR_COUNT (2)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING0_4) */
+#define _OKL4_OFS_ENV___PADDING0_4 (4)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING1_5) */
+#define _OKL4_OFS_ENV___PADDING1_5 (5)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING2_6) */
+#define _OKL4_OFS_ENV___PADDING2_6 (6)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING3_7) */
+#define _OKL4_OFS_ENV___PADDING3_7 (7)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_ITEM) */
+#define _OKL4_OFS_ENV_ENV_ITEM (8)
+/**
+ *   Offsets for struct okl4_env_access_cell
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ACCESS_CELL_SIZE) */
+#define OKL4_STRUCT_ENV_ACCESS_CELL_SIZE (16)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_CELL_NAME) */
+#define OKL4_OFS_ENV_ACCESS_CELL_NAME (0)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_CELL_NUM_ENTRIES) */
+#define OKL4_OFS_ENV_ACCESS_CELL_NUM_ENTRIES (8)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_CELL_START_ENTRY) */
+#define OKL4_OFS_ENV_ACCESS_CELL_START_ENTRY (12)
+/**
+ *   Offsets for struct okl4_env_access_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ACCESS_ENTRY_SIZE) */
+#define OKL4_STRUCT_ENV_ACCESS_ENTRY_SIZE (48)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_VIRTUAL_ADDRESS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_VIRTUAL_ADDRESS (0)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_OFFSET) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_OFFSET (8)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_SIZE) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_SIZE (16)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_NUM_SEGS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_NUM_SEGS (24)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_SEGMENT_INDEX) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_SEGMENT_INDEX (28)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_CACHE_ATTRS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_CACHE_ATTRS (32)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_PERMISSIONS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_PERMISSIONS (36)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_OBJECT_NAME) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_OBJECT_NAME (40)
+/**
+ *   Offsets for struct okl4_env_access_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ACCESS_TABLE_SIZE) */
+#define OKL4_STRUCT_ENV_ACCESS_TABLE_SIZE (24)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE_NUM_CELLS) */
+#define OKL4_OFS_ENV_ACCESS_TABLE_NUM_CELLS (0)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING0_4) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING1_5) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING2_6) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING3_7) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE_CELLS) */
+#define OKL4_OFS_ENV_ACCESS_TABLE_CELLS (8)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE_ENTRIES) */
+#define OKL4_OFS_ENV_ACCESS_TABLE_ENTRIES (16)
+/**
+ *   Offsets for struct okl4_env_args
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ARGS_SIZE) */
+#define OKL4_STRUCT_ENV_ARGS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS_ARGC) */
+#define OKL4_OFS_ENV_ARGS_ARGC (0)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING0_4) */
+#define OKL4_OFS_ENV_ARGS___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING1_5) */
+#define OKL4_OFS_ENV_ARGS___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING2_6) */
+#define OKL4_OFS_ENV_ARGS___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING3_7) */
+#define OKL4_OFS_ENV_ARGS___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS_ARGV) */
+#define OKL4_OFS_ENV_ARGS_ARGV (8)
+/**
+ *   Offsets for struct okl4_env_interrupt_device_map
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_INTERRUPT_DEVICE_MAP_SIZE) */
+#define OKL4_STRUCT_ENV_INTERRUPT_DEVICE_MAP_SIZE (4)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_NUM_ENTRIES) */
+#define OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_NUM_ENTRIES (0)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_ENTRIES) */
+#define OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_ENTRIES (4)
+/**
+ *   Offsets for struct okl4_interrupt
+ **/
+/*lint -esym(621, OKL4_STRUCT_INTERRUPT_SIZE) */
+#define OKL4_STRUCT_INTERRUPT_SIZE (4)
+/*lint -esym(621, OKL4_OFS_INTERRUPT_KCAP) */
+#define OKL4_OFS_INTERRUPT_KCAP (0)
+/**
+ *   Offsets for struct okl4_env_interrupt_handle
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_INTERRUPT_HANDLE_SIZE) */
+#define OKL4_STRUCT_ENV_INTERRUPT_HANDLE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_HANDLE_DESCRIPTOR) */
+#define OKL4_OFS_ENV_INTERRUPT_HANDLE_DESCRIPTOR (0)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT) */
+#define OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT (4)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT_KCAP) */
+#define OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT_KCAP (4)
+/**
+ *   Offsets for struct okl4_env_interrupt_list
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_INTERRUPT_LIST_SIZE) */
+#define OKL4_STRUCT_ENV_INTERRUPT_LIST_SIZE (24)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST_NUM_ENTRIES) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST_NUM_ENTRIES (0)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING0_4) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING1_5) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING2_6) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING3_7) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST_DESCRIPTOR) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST_DESCRIPTOR (8)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST_INTERRUPT) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST_INTERRUPT (16)
+/**
+ *   Offsets for struct okl4_env_profile_cell
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_PROFILE_CELL_SIZE) */
+#define OKL4_STRUCT_ENV_PROFILE_CELL_SIZE (48)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME (0)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_0) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_0 (0)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_1) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_1 (1)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_2) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_2 (2)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_3) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_3 (3)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_4) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_5) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_6) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_7) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_8) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_8 (8)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_9) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_9 (9)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_10) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_10 (10)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_11) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_11 (11)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_12) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_12 (12)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_13) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_13 (13)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_14) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_14 (14)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_15) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_15 (15)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_16) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_16 (16)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_17) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_17 (17)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_18) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_18 (18)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_19) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_19 (19)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_20) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_20 (20)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_21) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_21 (21)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_22) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_22 (22)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_23) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_23 (23)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_24) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_24 (24)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_25) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_25 (25)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_26) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_26 (26)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_27) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_27 (27)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_28) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_28 (28)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_29) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_29 (29)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_30) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_30 (30)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_31) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_31 (31)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NUM_CORES) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NUM_CORES (32)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING0_4) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING0_4 (36)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING1_5) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING1_5 (37)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING2_6) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING2_6 (38)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING3_7) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING3_7 (39)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_CORE) */
+#define OKL4_OFS_ENV_PROFILE_CELL_CORE (40)
+/**
+ *   Offsets for struct okl4_env_profile_cpu
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_PROFILE_CPU_SIZE) */
+#define OKL4_STRUCT_ENV_PROFILE_CPU_SIZE (4)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CPU_CAP) */
+#define OKL4_OFS_ENV_PROFILE_CPU_CAP (0)
+/**
+ *   Offsets for struct okl4_env_profile_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_PROFILE_TABLE_SIZE) */
+#define OKL4_STRUCT_ENV_PROFILE_TABLE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_TABLE_NUM_CELL_ENTRIES) */
+#define OKL4_OFS_ENV_PROFILE_TABLE_NUM_CELL_ENTRIES (0)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_TABLE_PCPU_CELL_ENTRY) */
+#define OKL4_OFS_ENV_PROFILE_TABLE_PCPU_CELL_ENTRY (4)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_TABLE_CELLS) */
+#define OKL4_OFS_ENV_PROFILE_TABLE_CELLS (8)
+/**
+ *   Offsets for struct okl4_env_segment
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_SEGMENT_SIZE) */
+#define OKL4_STRUCT_ENV_SEGMENT_SIZE (24)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_BASE) */
+#define OKL4_OFS_ENV_SEGMENT_BASE (0)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_SIZE) */
+#define OKL4_OFS_ENV_SEGMENT_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_CAP_ID) */
+#define OKL4_OFS_ENV_SEGMENT_CAP_ID (16)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_RWX) */
+#define OKL4_OFS_ENV_SEGMENT_RWX (20)
+/**
+ *   Offsets for struct okl4_env_segment_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_SEGMENT_TABLE_SIZE) */
+#define OKL4_STRUCT_ENV_SEGMENT_TABLE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE_NUM_SEGMENTS) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE_NUM_SEGMENTS (0)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING0_4) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING1_5) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING2_6) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING3_7) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE_SEGMENTS) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE_SEGMENTS (8)
+/**
+ *   Offsets for struct okl4_firmware_segment
+ **/
+/*lint -esym(621, OKL4_STRUCT_FIRMWARE_SEGMENT_SIZE) */
+#define OKL4_STRUCT_FIRMWARE_SEGMENT_SIZE (32)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_COPY_ADDR) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_COPY_ADDR (0)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_EXEC_ADDR) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_EXEC_ADDR (8)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_FILESZ) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_FILESZ (16)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_MEMSZ_DIFF) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_MEMSZ_DIFF (24)
+/**
+ *   Offsets for struct okl4_firmware_segments_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_FIRMWARE_SEGMENTS_INFO_SIZE) */
+#define OKL4_STRUCT_FIRMWARE_SEGMENTS_INFO_SIZE (8)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO_NUM_SEGMENTS) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO_NUM_SEGMENTS (0)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING0_4) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING1_5) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING2_6) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING3_7) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO_SEGMENTS) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO_SEGMENTS (8)
+/**
+ *   Offsets for struct okl4_kmmu
+ **/
+/*lint -esym(621, OKL4_STRUCT_KMMU_SIZE) */
+#define OKL4_STRUCT_KMMU_SIZE (4)
+/*lint -esym(621, OKL4_OFS_KMMU_KCAP) */
+#define OKL4_OFS_KMMU_KCAP (0)
+/**
+ *   Offsets for struct okl4_ksp_user_agent
+ **/
+/*lint -esym(621, OKL4_STRUCT_KSP_USER_AGENT_SIZE) */
+#define OKL4_STRUCT_KSP_USER_AGENT_SIZE (8)
+/*lint -esym(621, OKL4_OFS_KSP_USER_AGENT_KCAP) */
+#define OKL4_OFS_KSP_USER_AGENT_KCAP (0)
+/*lint -esym(621, OKL4_OFS_KSP_USER_AGENT_VIRQ) */
+#define OKL4_OFS_KSP_USER_AGENT_VIRQ (4)
+/**
+ *   Offsets for struct okl4_pipe_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_PIPE_DATA_SIZE) */
+#define OKL4_STRUCT_PIPE_DATA_SIZE (8)
+/*lint -esym(621, OKL4_OFS_PIPE_DATA_KCAP) */
+#define OKL4_OFS_PIPE_DATA_KCAP (0)
+/*lint -esym(621, OKL4_OFS_PIPE_DATA_VIRQ) */
+#define OKL4_OFS_PIPE_DATA_VIRQ (4)
+/**
+ *   Offsets for struct okl4_pipe_ep_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_PIPE_EP_DATA_SIZE) */
+#define OKL4_STRUCT_PIPE_EP_DATA_SIZE (16)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_RX) */
+#define OKL4_OFS_PIPE_EP_DATA_RX (0)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_RX_KCAP) */
+#define OKL4_OFS_PIPE_EP_DATA_RX_KCAP (0)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_RX_VIRQ) */
+#define OKL4_OFS_PIPE_EP_DATA_RX_VIRQ (4)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_TX) */
+#define OKL4_OFS_PIPE_EP_DATA_TX (8)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_TX_KCAP) */
+#define OKL4_OFS_PIPE_EP_DATA_TX_KCAP (8)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_TX_VIRQ) */
+#define OKL4_OFS_PIPE_EP_DATA_TX_VIRQ (12)
+/**
+ *   Offsets for struct okl4_link
+ **/
+/*lint -esym(621, OKL4_STRUCT_LINK_SIZE) */
+#define OKL4_STRUCT_LINK_SIZE (80)
+/*lint -esym(621, OKL4_OFS_LINK_NAME) */
+#define OKL4_OFS_LINK_NAME (0)
+/*lint -esym(621, OKL4_OFS_LINK_OPAQUE) */
+#define OKL4_OFS_LINK_OPAQUE (8)
+/*lint -esym(621, OKL4_OFS_LINK_PARTNER_NAME) */
+#define OKL4_OFS_LINK_PARTNER_NAME (16)
+/*lint -esym(621, OKL4_OFS_LINK_ROLE) */
+#define OKL4_OFS_LINK_ROLE (24)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING0_4) */
+#define OKL4_OFS_LINK___PADDING0_4 (28)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING1_5) */
+#define OKL4_OFS_LINK___PADDING1_5 (29)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING2_6) */
+#define OKL4_OFS_LINK___PADDING2_6 (30)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING3_7) */
+#define OKL4_OFS_LINK___PADDING3_7 (31)
+/*lint -esym(621, OKL4_OFS_LINK_TRANSPORT) */
+#define OKL4_OFS_LINK_TRANSPORT (32)
+/*lint -esym(621, OKL4_OFS_LINK_TRANSPORT_TYPE) */
+#define OKL4_OFS_LINK_TRANSPORT_TYPE (72)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING4_4) */
+#define OKL4_OFS_LINK___PADDING4_4 (76)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING5_5) */
+#define OKL4_OFS_LINK___PADDING5_5 (77)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING6_6) */
+#define OKL4_OFS_LINK___PADDING6_6 (78)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING7_7) */
+#define OKL4_OFS_LINK___PADDING7_7 (79)
+/**
+ *   Offsets for struct okl4_links
+ **/
+/*lint -esym(621, OKL4_STRUCT_LINKS_SIZE) */
+#define OKL4_STRUCT_LINKS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_LINKS_NUM_LINKS) */
+#define OKL4_OFS_LINKS_NUM_LINKS (0)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING0_4) */
+#define OKL4_OFS_LINKS___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING1_5) */
+#define OKL4_OFS_LINKS___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING2_6) */
+#define OKL4_OFS_LINKS___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING3_7) */
+#define OKL4_OFS_LINKS___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_LINKS_LINKS) */
+#define OKL4_OFS_LINKS_LINKS (8)
+/**
+ *   Offsets for struct okl4_machine_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_MACHINE_INFO_SIZE) */
+#define OKL4_STRUCT_MACHINE_INFO_SIZE (24)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO_L1_CACHE_LINE_SIZE) */
+#define OKL4_OFS_MACHINE_INFO_L1_CACHE_LINE_SIZE (0)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO_L2_CACHE_LINE_SIZE) */
+#define OKL4_OFS_MACHINE_INFO_L2_CACHE_LINE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO_NUM_CPUS) */
+#define OKL4_OFS_MACHINE_INFO_NUM_CPUS (16)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING0_4) */
+#define OKL4_OFS_MACHINE_INFO___PADDING0_4 (20)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING1_5) */
+#define OKL4_OFS_MACHINE_INFO___PADDING1_5 (21)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING2_6) */
+#define OKL4_OFS_MACHINE_INFO___PADDING2_6 (22)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING3_7) */
+#define OKL4_OFS_MACHINE_INFO___PADDING3_7 (23)
+/**
+ *   Offsets for struct okl4_merged_physpool
+ **/
+/*lint -esym(621, OKL4_STRUCT_MERGED_PHYSPOOL_SIZE) */
+#define OKL4_STRUCT_MERGED_PHYSPOOL_SIZE (16)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL_PHYS_ADDR) */
+#define OKL4_OFS_MERGED_PHYSPOOL_PHYS_ADDR (0)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL_NUM_SEGMENTS) */
+#define OKL4_OFS_MERGED_PHYSPOOL_NUM_SEGMENTS (8)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING0_4) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING1_5) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING2_6) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING3_7) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING3_7 (15)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL_SEGMENTS) */
+#define OKL4_OFS_MERGED_PHYSPOOL_SEGMENTS (16)
+/**
+ *   Offsets for struct okl4_microvisor_timer
+ **/
+/*lint -esym(621, OKL4_STRUCT_MICROVISOR_TIMER_SIZE) */
+#define OKL4_STRUCT_MICROVISOR_TIMER_SIZE (8)
+/*lint -esym(621, OKL4_OFS_MICROVISOR_TIMER_KCAP) */
+#define OKL4_OFS_MICROVISOR_TIMER_KCAP (0)
+/*lint -esym(621, OKL4_OFS_MICROVISOR_TIMER_VIRQ) */
+#define OKL4_OFS_MICROVISOR_TIMER_VIRQ (4)
+/**
+ *   Offsets for struct okl4_cpu_registers
+ **/
+/*lint -esym(621, OKL4_STRUCT_CPU_REGISTERS_SIZE) */
+#define OKL4_STRUCT_CPU_REGISTERS_SIZE (448)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X) */
+#define OKL4_OFS_CPU_REGISTERS_X (0)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_0) */
+#define OKL4_OFS_CPU_REGISTERS_X_0 (0)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_1) */
+#define OKL4_OFS_CPU_REGISTERS_X_1 (8)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_2) */
+#define OKL4_OFS_CPU_REGISTERS_X_2 (16)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_3) */
+#define OKL4_OFS_CPU_REGISTERS_X_3 (24)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_4) */
+#define OKL4_OFS_CPU_REGISTERS_X_4 (32)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_5) */
+#define OKL4_OFS_CPU_REGISTERS_X_5 (40)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_6) */
+#define OKL4_OFS_CPU_REGISTERS_X_6 (48)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_7) */
+#define OKL4_OFS_CPU_REGISTERS_X_7 (56)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_8) */
+#define OKL4_OFS_CPU_REGISTERS_X_8 (64)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_9) */
+#define OKL4_OFS_CPU_REGISTERS_X_9 (72)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_10) */
+#define OKL4_OFS_CPU_REGISTERS_X_10 (80)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_11) */
+#define OKL4_OFS_CPU_REGISTERS_X_11 (88)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_12) */
+#define OKL4_OFS_CPU_REGISTERS_X_12 (96)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_13) */
+#define OKL4_OFS_CPU_REGISTERS_X_13 (104)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_14) */
+#define OKL4_OFS_CPU_REGISTERS_X_14 (112)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_15) */
+#define OKL4_OFS_CPU_REGISTERS_X_15 (120)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_16) */
+#define OKL4_OFS_CPU_REGISTERS_X_16 (128)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_17) */
+#define OKL4_OFS_CPU_REGISTERS_X_17 (136)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_18) */
+#define OKL4_OFS_CPU_REGISTERS_X_18 (144)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_19) */
+#define OKL4_OFS_CPU_REGISTERS_X_19 (152)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_20) */
+#define OKL4_OFS_CPU_REGISTERS_X_20 (160)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_21) */
+#define OKL4_OFS_CPU_REGISTERS_X_21 (168)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_22) */
+#define OKL4_OFS_CPU_REGISTERS_X_22 (176)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_23) */
+#define OKL4_OFS_CPU_REGISTERS_X_23 (184)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_24) */
+#define OKL4_OFS_CPU_REGISTERS_X_24 (192)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_25) */
+#define OKL4_OFS_CPU_REGISTERS_X_25 (200)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_26) */
+#define OKL4_OFS_CPU_REGISTERS_X_26 (208)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_27) */
+#define OKL4_OFS_CPU_REGISTERS_X_27 (216)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_28) */
+#define OKL4_OFS_CPU_REGISTERS_X_28 (224)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_29) */
+#define OKL4_OFS_CPU_REGISTERS_X_29 (232)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_30) */
+#define OKL4_OFS_CPU_REGISTERS_X_30 (240)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SP_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_SP_EL0 (248)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_IP) */
+#define OKL4_OFS_CPU_REGISTERS_IP (256)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CPSR) */
+#define OKL4_OFS_CPU_REGISTERS_CPSR (264)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING0_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING0_4 (268)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING1_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING1_5 (269)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING2_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING2_6 (270)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING3_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING3_7 (271)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SP_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_SP_EL1 (272)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_ELR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_ELR_EL1 (280)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_EL1 (288)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_ABT) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_ABT (292)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_UND) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_UND (296)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_IRQ) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_IRQ (300)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_FIQ) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_FIQ (304)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CSSELR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CSSELR_EL1 (308)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SCTLR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_SCTLR_EL1 (312)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CPACR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CPACR_EL1 (316)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TTBR0_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TTBR0_EL1 (320)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TTBR1_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TTBR1_EL1 (328)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TCR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TCR_EL1 (336)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_DACR32_EL2) */
+#define OKL4_OFS_CPU_REGISTERS_DACR32_EL2 (344)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_IFSR32_EL2) */
+#define OKL4_OFS_CPU_REGISTERS_IFSR32_EL2 (348)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_ESR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_ESR_EL1 (352)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING4_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING4_4 (356)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING5_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING5_5 (357)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING6_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING6_6 (358)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING7_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING7_7 (359)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_FAR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_FAR_EL1 (360)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_PAR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_PAR_EL1 (368)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_MAIR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_MAIR_EL1 (376)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_VBAR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_VBAR_EL1 (384)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CONTEXTIDR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CONTEXTIDR_EL1 (392)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING8_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING8_4 (396)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING9_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING9_5 (397)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING10_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING10_6 (398)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING11_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING11_7 (399)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TPIDR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TPIDR_EL1 (400)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TPIDRRO_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_TPIDRRO_EL0 (408)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TPIDR_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_TPIDR_EL0 (416)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_PMCR_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_PMCR_EL0 (424)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING12_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING12_4 (428)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING13_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING13_5 (429)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING14_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING14_6 (430)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING15_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING15_7 (431)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_PMCCNTR_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_PMCCNTR_EL0 (432)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_FPEXC32_EL2) */
+#define OKL4_OFS_CPU_REGISTERS_FPEXC32_EL2 (440)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CNTKCTL_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CNTKCTL_EL1 (444)
+/**
+ *   Offsets for struct okl4_schedule_profile_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_SCHEDULE_PROFILE_DATA_SIZE) */
+#define OKL4_STRUCT_SCHEDULE_PROFILE_DATA_SIZE (32)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_TIMESTAMP) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_TIMESTAMP (0)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_TIME) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_TIME (8)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CONTEXT_SWITCHES) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CONTEXT_SWITCHES (16)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_MIGRATIONS) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_MIGRATIONS (20)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_HWIRQS) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_HWIRQS (24)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_VIRQS) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_VIRQS (28)
+/**
+ *   Offsets for struct okl4_shared_buffer
+ **/
+/*lint -esym(621, OKL4_STRUCT_SHARED_BUFFER_SIZE) */
+#define OKL4_STRUCT_SHARED_BUFFER_SIZE (32)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_PHYSICAL_BASE) */
+#define OKL4_OFS_SHARED_BUFFER_PHYSICAL_BASE (0)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_BASE) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_BASE (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_SIZE) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_CAP) */
+#define OKL4_OFS_SHARED_BUFFER_CAP (24)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING0_4) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING0_4 (28)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING1_5) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING1_5 (29)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING2_6) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING2_6 (30)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING3_7) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING3_7 (31)
+/**
+ *   Offsets for struct okl4_shared_buffers_array
+ **/
+/*lint -esym(621, OKL4_STRUCT_SHARED_BUFFERS_ARRAY_SIZE) */
+#define OKL4_STRUCT_SHARED_BUFFERS_ARRAY_SIZE (16)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY_BUFFERS) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY_BUFFERS (0)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY_NUM_BUFFERS) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY_NUM_BUFFERS (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING0_4) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING1_5) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING2_6) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING3_7) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING3_7 (15)
+/**
+ *   Offsets for struct _okl4_tracebuffer_buffer_header
+ **/
+/*lint -esym(621, _OKL4_STRUCT_TRACEBUFFER_BUFFER_HEADER_SIZE) */
+#define _OKL4_STRUCT_TRACEBUFFER_BUFFER_HEADER_SIZE (40)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_TIMESTAMP) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_TIMESTAMP (0)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_WRAP) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_WRAP (8)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING0_4) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING0_4 (12)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING1_5) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING1_5 (13)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING2_6) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING2_6 (14)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING3_7) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING3_7 (15)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_SIZE) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_SIZE (16)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_HEAD) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_HEAD (24)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_OFFSET) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_OFFSET (32)
+/**
+ *   Offsets for struct okl4_tracebuffer_env
+ **/
+/*lint -esym(621, OKL4_STRUCT_TRACEBUFFER_ENV_SIZE) */
+#define OKL4_STRUCT_TRACEBUFFER_ENV_SIZE (24)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT (0)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE (0)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_BASE) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_BASE (0)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_SIZE) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRQ) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRQ (16)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING0_4) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING0_4 (20)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING1_5) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING1_5 (21)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING2_6) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING2_6 (22)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING3_7) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING3_7 (23)
+/**
+ *   Offsets for struct _okl4_tracebuffer_header
+ **/
+/*lint -esym(621, _OKL4_STRUCT_TRACEBUFFER_HEADER_SIZE) */
+#define _OKL4_STRUCT_TRACEBUFFER_HEADER_SIZE (40)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_MAGIC) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_MAGIC (0)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_VERSION) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_VERSION (4)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_ID) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_ID (8)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_NUM_BUFFERS) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_NUM_BUFFERS (12)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_BUFFER_SIZE) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_BUFFER_SIZE (16)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_LOG_MASK) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_LOG_MASK (24)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_ACTIVE_BUFFER) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_ACTIVE_BUFFER (28)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_GRABBED_BUFFER) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_GRABBED_BUFFER (32)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_EMPTY_BUFFERS) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_EMPTY_BUFFERS (36)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_BUFFERS) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_BUFFERS (40)
+/**
+ *   Offsets for struct okl4_tracepoint_entry_base
+ **/
+/*lint -esym(621, OKL4_STRUCT_TRACEPOINT_ENTRY_BASE_SIZE) */
+#define OKL4_STRUCT_TRACEPOINT_ENTRY_BASE_SIZE (12)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_ENTRY_BASE_TIME_OFFSET) */
+#define OKL4_OFS_TRACEPOINT_ENTRY_BASE_TIME_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_ENTRY_BASE_MASKS) */
+#define OKL4_OFS_TRACEPOINT_ENTRY_BASE_MASKS (4)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_ENTRY_BASE_DESCRIPTION) */
+#define OKL4_OFS_TRACEPOINT_ENTRY_BASE_DESCRIPTION (8)
+/**
+ *   Offsets for struct okl4_tracepoint_unpacked_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_TRACEPOINT_UNPACKED_ENTRY_SIZE) */
+#define OKL4_STRUCT_TRACEPOINT_UNPACKED_ENTRY_SIZE (12)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY (0)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_TIME_OFFSET) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_TIME_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_MASKS) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_MASKS (4)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_DESCRIPTION) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_DESCRIPTION (8)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_DATA) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_DATA (12)
+/**
+ *   Offsets for struct okl4_vclient_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_VCLIENT_INFO_SIZE) */
+#define OKL4_STRUCT_VCLIENT_INFO_SIZE (32)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP (0)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX (0)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_KCAP) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_KCAP (0)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_SEGMENT) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_SEGMENT (4)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_VIRQ) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_VIRQ (8)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX (12)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_KCAP) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_KCAP (12)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_SEGMENT) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_SEGMENT (16)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_VIRQ) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_VIRQ (20)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_OPAQUE) */
+#define OKL4_OFS_VCLIENT_INFO_OPAQUE (24)
+/**
+ *   Offsets for struct okl4_vcpu_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_VCPU_ENTRY_SIZE) */
+#define OKL4_STRUCT_VCPU_ENTRY_SIZE (24)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_VCPU) */
+#define OKL4_OFS_VCPU_ENTRY_VCPU (0)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_IPI) */
+#define OKL4_OFS_VCPU_ENTRY_IPI (4)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_IRQ) */
+#define OKL4_OFS_VCPU_ENTRY_IRQ (8)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING0_4) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING1_5) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING2_6) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING3_7) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING3_7 (15)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_STACK_POINTER) */
+#define OKL4_OFS_VCPU_ENTRY_STACK_POINTER (16)
+/**
+ *   Offsets for struct okl4_vcpu_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_VCPU_TABLE_SIZE) */
+#define OKL4_STRUCT_VCPU_TABLE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE_NUM_VCPUS) */
+#define OKL4_OFS_VCPU_TABLE_NUM_VCPUS (0)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING0_4) */
+#define OKL4_OFS_VCPU_TABLE___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING1_5) */
+#define OKL4_OFS_VCPU_TABLE___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING2_6) */
+#define OKL4_OFS_VCPU_TABLE___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING3_7) */
+#define OKL4_OFS_VCPU_TABLE___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE_VCPU) */
+#define OKL4_OFS_VCPU_TABLE_VCPU (8)
+/**
+ *   Offsets for struct okl4_vfp_ctrl_registers
+ **/
+/*lint -esym(621, OKL4_STRUCT_VFP_CTRL_REGISTERS_SIZE) */
+#define OKL4_STRUCT_VFP_CTRL_REGISTERS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_VFP_CTRL_REGISTERS_FPSR) */
+#define OKL4_OFS_VFP_CTRL_REGISTERS_FPSR (0)
+/*lint -esym(621, OKL4_OFS_VFP_CTRL_REGISTERS_FPCR) */
+#define OKL4_OFS_VFP_CTRL_REGISTERS_FPCR (4)
+/**
+ *   Offsets for struct okl4_vfp_register
+ **/
+/*lint -esym(621, OKL4_STRUCT_VFP_REGISTER_SIZE) */
+#define OKL4_STRUCT_VFP_REGISTER_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES) */
+#define OKL4_OFS_VFP_REGISTER___BYTES (0)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_0) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_0 (0)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_1) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_1 (1)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_2) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_2 (2)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_3) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_3 (3)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_4) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_4 (4)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_5) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_5 (5)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_6) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_6 (6)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_7) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_7 (7)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_8) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_8 (8)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_9) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_9 (9)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_10) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_10 (10)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_11) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_11 (11)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_12) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_12 (12)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_13) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_13 (13)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_14) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_14 (14)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_15) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_15 (15)
+/**
+ *   Offsets for struct okl4_vfp_registers
+ **/
+/*lint -esym(621, OKL4_STRUCT_VFP_REGISTERS_SIZE) */
+#define OKL4_STRUCT_VFP_REGISTERS_SIZE (528)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V0) */
+#define OKL4_OFS_VFP_REGISTERS_V0 (0)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V1) */
+#define OKL4_OFS_VFP_REGISTERS_V1 (16)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V2) */
+#define OKL4_OFS_VFP_REGISTERS_V2 (32)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V3) */
+#define OKL4_OFS_VFP_REGISTERS_V3 (48)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V4) */
+#define OKL4_OFS_VFP_REGISTERS_V4 (64)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V5) */
+#define OKL4_OFS_VFP_REGISTERS_V5 (80)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V6) */
+#define OKL4_OFS_VFP_REGISTERS_V6 (96)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V7) */
+#define OKL4_OFS_VFP_REGISTERS_V7 (112)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V8) */
+#define OKL4_OFS_VFP_REGISTERS_V8 (128)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V9) */
+#define OKL4_OFS_VFP_REGISTERS_V9 (144)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V10) */
+#define OKL4_OFS_VFP_REGISTERS_V10 (160)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V11) */
+#define OKL4_OFS_VFP_REGISTERS_V11 (176)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V12) */
+#define OKL4_OFS_VFP_REGISTERS_V12 (192)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V13) */
+#define OKL4_OFS_VFP_REGISTERS_V13 (208)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V14) */
+#define OKL4_OFS_VFP_REGISTERS_V14 (224)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V15) */
+#define OKL4_OFS_VFP_REGISTERS_V15 (240)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V16) */
+#define OKL4_OFS_VFP_REGISTERS_V16 (256)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V17) */
+#define OKL4_OFS_VFP_REGISTERS_V17 (272)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V18) */
+#define OKL4_OFS_VFP_REGISTERS_V18 (288)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V19) */
+#define OKL4_OFS_VFP_REGISTERS_V19 (304)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V20) */
+#define OKL4_OFS_VFP_REGISTERS_V20 (320)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V21) */
+#define OKL4_OFS_VFP_REGISTERS_V21 (336)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V22) */
+#define OKL4_OFS_VFP_REGISTERS_V22 (352)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V23) */
+#define OKL4_OFS_VFP_REGISTERS_V23 (368)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V24) */
+#define OKL4_OFS_VFP_REGISTERS_V24 (384)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V25) */
+#define OKL4_OFS_VFP_REGISTERS_V25 (400)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V26) */
+#define OKL4_OFS_VFP_REGISTERS_V26 (416)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V27) */
+#define OKL4_OFS_VFP_REGISTERS_V27 (432)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V28) */
+#define OKL4_OFS_VFP_REGISTERS_V28 (448)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V29) */
+#define OKL4_OFS_VFP_REGISTERS_V29 (464)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V30) */
+#define OKL4_OFS_VFP_REGISTERS_V30 (480)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V31) */
+#define OKL4_OFS_VFP_REGISTERS_V31 (496)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_CONTROL) */
+#define OKL4_OFS_VFP_REGISTERS_CONTROL (512)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_CONTROL_FPSR) */
+#define OKL4_OFS_VFP_REGISTERS_CONTROL_FPSR (512)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_CONTROL_FPCR) */
+#define OKL4_OFS_VFP_REGISTERS_CONTROL_FPCR (516)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING0_8) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING0_8 (520)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING1_9) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING1_9 (521)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING2_10) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING2_10 (522)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING3_11) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING3_11 (523)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING4_12) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING4_12 (524)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING5_13) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING5_13 (525)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING6_14) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING6_14 (526)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING7_15) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING7_15 (527)
+/**
+ *   Offsets for struct okl4_virtmem_pool
+ **/
+/*lint -esym(621, OKL4_STRUCT_VIRTMEM_POOL_SIZE) */
+#define OKL4_STRUCT_VIRTMEM_POOL_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL_RANGE) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL_RANGE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_BASE) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_BASE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_SIZE) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_SIZE (8)
+/**
+ *   Offsets for struct okl4_virtual_interrupt_lines
+ **/
+/*lint -esym(621, OKL4_STRUCT_VIRTUAL_INTERRUPT_LINES_SIZE) */
+#define OKL4_STRUCT_VIRTUAL_INTERRUPT_LINES_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES_NUM_LINES) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES_NUM_LINES (0)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING0_4) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING1_5) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING2_6) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING3_7) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES_LINES) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES_LINES (8)
+/**
+ *   Offsets for struct okl4_vserver_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVER_INFO_SIZE) */
+#define OKL4_STRUCT_VSERVER_INFO_SIZE (32)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS (0)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS_DATA) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS_DATA (0)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS_MAX_MESSAGES) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS_MAX_MESSAGES (8)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING0_4) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING1_5) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING2_6) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING3_7) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING3_7 (15)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS_MESSAGE_SIZE) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS_MESSAGE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_NUM_CLIENTS) */
+#define OKL4_OFS_VSERVER_INFO_NUM_CLIENTS (24)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING0_4) */
+#define OKL4_OFS_VSERVER_INFO___PADDING0_4 (28)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING1_5) */
+#define OKL4_OFS_VSERVER_INFO___PADDING1_5 (29)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING2_6) */
+#define OKL4_OFS_VSERVER_INFO___PADDING2_6 (30)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING3_7) */
+#define OKL4_OFS_VSERVER_INFO___PADDING3_7 (31)
+/**
+ *   Offsets for struct okl4_vservices_service_descriptor
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVICES_SERVICE_DESCRIPTOR_SIZE) */
+#define OKL4_STRUCT_VSERVICES_SERVICE_DESCRIPTOR_SIZE (24)
+/*lint -esym(621, OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_NAME) */
+#define OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_NAME (0)
+/*lint -esym(621, OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_PROTOCOL) */
+#define OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_PROTOCOL (8)
+/*lint -esym(621, OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_RESERVED) */
+#define OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_RESERVED (16)
+/**
+ *   Offsets for struct okl4_vservices_transport_microvisor
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVICES_TRANSPORT_MICROVISOR_SIZE) */
+#define OKL4_STRUCT_VSERVICES_TRANSPORT_MICROVISOR_SIZE (120)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_IS_SERVER) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_IS_SERVER (0)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING0_1) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING0_1 (1)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING1_2) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING1_2 (2)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING2_3) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING2_3 (3)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_TYPE) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_TYPE (4)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_U) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_U (8)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN (72)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_NUM_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_NUM_LINES (72)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING0_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING0_4 (76)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING1_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING1_5 (77)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING2_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING2_6 (78)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING3_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING3_7 (79)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_LINES (80)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT (88)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_NUM_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_NUM_LINES (88)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING0_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING0_4 (92)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING1_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING1_5 (93)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING2_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING2_6 (94)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING3_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING3_7 (95)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_LINES (96)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_NUM_SERVICES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_NUM_SERVICES (104)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING3_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING3_4 (108)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING4_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING4_5 (109)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING5_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING5_6 (110)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING6_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING6_7 (111)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_SERVICES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_SERVICES (112)
+/**
+ *   Offsets for struct okl4_vservices_transports
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVICES_TRANSPORTS_SIZE) */
+#define OKL4_STRUCT_VSERVICES_TRANSPORTS_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS_NUM_TRANSPORTS) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS_NUM_TRANSPORTS (0)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING0_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING1_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING2_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING3_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS_TRANSPORTS) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS_TRANSPORTS (8)
+
+#endif /* ASSEMBLY */
+
+#endif /* __AUTO__MICROVISOR_OFFSETS_H__ */
+
diff --git a/include/microvisor/kernel/syscalls.h b/include/microvisor/kernel/syscalls.h
new file mode 100644
index 0000000..fdc2c0d
--- /dev/null
+++ b/include/microvisor/kernel/syscalls.h
@@ -0,0 +1,6114 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+
+
+/** @addtogroup lib_microvisor
+ * @{
+ */
+
+#ifndef __AUTO__USER_SYSCALLS_H__
+#define __AUTO__USER_SYSCALLS_H__
+
+/**
+ * @cond no_doc
+ */
+#if defined(ASSEMBLY)
+#define __hvc_str(x) x
+#else
+#define _hvc_str(x) #x
+#define __hvc_str(x) _hvc_str(x)
+#endif
+#if (defined(__GNUC__) && !defined(__clang__)) && \
+    (__GNUC__ < 4 || ((__GNUC__ == 4) && (__GNUC_MINOR__ < 5)))
+#if defined(__thumb2__)
+#define hvc(i) __hvc_str(.hword 0xf7e0 | (i & 0xf); .hword 8000 | (i >> 4) @ HVC)
+#else
+#define hvc(i) __hvc_str(.word 0xe1400070 | (i & 0xf) | (i >> 4 << 8) @ HVC)
+#endif
+#else
+#if defined(__ARM_EABI__)
+#if defined(ASSEMBLY) && !defined(__clang__)
+    .arch_extension virt
+#elif !defined(__clang__)
+__asm__(
+    ".arch_extension virt\n"
+);
+#endif
+#endif
+#define hvc(i) __hvc_str(hvc i)
+#endif
+/**
+ * @endcond
+ */
+
+#if !defined(ASSEMBLY)
+
+#define OKL4_OK OKL4_ERROR_OK
+
+/** @} */
+
+/*
+ * Syscall prototypes.
+ */
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_PROCESS_RECV
+ *
+ * @param axon_id
+ * @param transfer_limit
+ *
+ * @retval error
+ * @retval send_empty
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_axon_process_recv_return
+_okl4_sys_axon_process_recv(okl4_kcap_t axon_id, okl4_lsize_t transfer_limit)
+{
+    struct _okl4_sys_axon_process_recv_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(transfer_limit        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((transfer_limit >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5184)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    result.send_empty = (okl4_bool_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_axon_process_recv_return
+_okl4_sys_axon_process_recv(okl4_kcap_t axon_id, okl4_lsize_t transfer_limit)
+{
+    struct _okl4_sys_axon_process_recv_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)transfer_limit;
+    __asm__ __volatile__(
+            "" hvc(5184) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.send_empty = (okl4_bool_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_HALTED
+ *
+ * @param axon_id
+ * @param halted
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_halted(okl4_kcap_t axon_id, okl4_bool_t halted)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)halted;
+    __asm__ __volatile__(
+            ""hvc(5186)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_halted(okl4_kcap_t axon_id, okl4_bool_t halted)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)halted;
+    __asm__ __volatile__(
+            "" hvc(5186) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_RECV_AREA
+ *
+ * @param axon_id
+ * @param base
+ * @param size
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+        okl4_lsize_t size)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(base        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((base >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)(size        & 0xffffffff);
+    register uint32_t r4 asm("r4") = (uint32_t)((size >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5187)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+        okl4_lsize_t size)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)base;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+    __asm__ __volatile__(
+            "" hvc(5187) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_RECV_QUEUE
+ *
+ * @param axon_id
+ * @param queue
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(queue        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((queue >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5188)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)queue;
+    __asm__ __volatile__(
+            "" hvc(5188) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_RECV_SEGMENT
+ *
+ * @param axon_id
+ * @param segment_id
+ * @param segment_base
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+        okl4_laddr_t segment_base)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)segment_id;
+    register uint32_t r2 asm("r2") = (uint32_t)(segment_base        & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)((segment_base >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5189)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+        okl4_laddr_t segment_base)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)segment_id;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_base;
+    __asm__ __volatile__(
+            "" hvc(5189) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_SEND_AREA
+ *
+ * @param axon_id
+ * @param base
+ * @param size
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+        okl4_lsize_t size)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(base        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((base >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)(size        & 0xffffffff);
+    register uint32_t r4 asm("r4") = (uint32_t)((size >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5190)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+        okl4_lsize_t size)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)base;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+    __asm__ __volatile__(
+            "" hvc(5190) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_SEND_QUEUE
+ *
+ * @param axon_id
+ * @param queue
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(queue        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((queue >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5191)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)queue;
+    __asm__ __volatile__(
+            "" hvc(5191) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_SEND_SEGMENT
+ *
+ * @param axon_id
+ * @param segment_id
+ * @param segment_base
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+        okl4_laddr_t segment_base)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)segment_id;
+    register uint32_t r2 asm("r2") = (uint32_t)(segment_base        & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)((segment_base >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5192)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+        okl4_laddr_t segment_base)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)segment_id;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_base;
+    __asm__ __volatile__(
+            "" hvc(5192) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_TRIGGER_SEND
+ *
+ * @param axon_id
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_trigger_send(okl4_kcap_t axon_id)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    __asm__ __volatile__(
+            ""hvc(5185)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_trigger_send(okl4_kcap_t axon_id)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    __asm__ __volatile__(
+            "" hvc(5185) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Acknowledge the delivery of an interrupt.
+ *
+ *    @details
+ *    This API returns the number and source of the highest-priority
+ *        enabled,
+ *    pending and inactive interrupt that is targeted at the calling vCPU
+ *    and has higher priority than the calling vCPU's running group
+ *        priority.
+ *
+ *    The returned interrupt is marked as active, and will not be returned
+ *        again
+ *    by this function until @ref okl4_sys_interrupt_eoi is invoked
+ *        specifying the
+ *    same interrupt number and source. The vCPU's running interrupt
+ *        priority is
+ *    raised to the priority of the returned interrupt. This will typically
+ *        result
+ *    in the de-assertion of the vCPU's virtual IRQ line.
+ *
+ *    If no such interrupt exists, interrupt number 1023 is returned. If
+ *        the
+ *    returned interrupt number is 16 or greater, the source ID is 0;
+ *        otherwise it
+ *    is the vCPU ID of the vCPU that raised the interrupt (which is always
+ *        in the
+ *    same Cell as the caller).
+ *
+ *    @note Invoking this API is equivalent to reading from the GIC CPU
+ *    Interface's Interrupt Acknowledge Register (\p GICC_IAR).
+ *
+ *
+ * @retval irq
+ *    An interrupt line number for the virtual GIC.
+ * @retval source
+ *    The ID of the originating vCPU of a Software-Generated Interrupt.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_ack_return
+_okl4_sys_interrupt_ack(void)
+{
+    struct _okl4_sys_interrupt_ack_return result;
+
+    register uint32_t r0 asm("r0");
+    register uint32_t r1 asm("r1");
+    __asm__ __volatile__(
+            ""hvc(5128)"\n\t"
+            : "=r"(r0), "=r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    result.irq = (okl4_interrupt_number_t)(r0);
+    result.source = (uint8_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_ack_return
+_okl4_sys_interrupt_ack(void)
+{
+    struct _okl4_sys_interrupt_ack_return result;
+
+    register okl4_register_t x0 asm("x0");
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5128) "\n\t"
+            : "=r"(x0), "=r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.irq = (okl4_interrupt_number_t)(x0);
+    result.source = (uint8_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Register a vCPU as the handler of an interrupt.
+ *
+ *    @details
+ *    The Microvisor virtual GIC API permits an interrupt source to be
+ *        dynamically
+ *    assigned to a specific IRQ number in a Cell or vCPU. An interrupt can
+ *        only
+ *    be assigned to one IRQ number, and one Cell or vCPU, at a time. This
+ *    operation attaches the interrupt to a vCPU as a private interrupt.
+ *
+ *    Interrupt sources are addressed using capabilities. This operation,
+ *        given
+ *    a capability for an interrupt that is not currently attached to any
+ *        handler,
+ *    can attach the interrupt at a given unused IRQ number. If the IRQ
+ *        number
+ *    is between 16 and 31 (the GIC Private Peripheral Interrupt range), it
+ *        will
+ *    be attached to the specified vCPU; if it is between 32 and 1019 (the
+ *        GIC
+ *    Shared Peripheral Interrupt range), it will return an error.
+ *
+ *    @note The Software Generated Interrupt range, from 0 to 15, is
+ *        reserved
+ *    and cannot be used to attach interrupt source capabilities.
+ *
+ *    @note In most cases, interrupt sources are attached at system
+ *        construction
+ *    time by the OK Tool. It is not normally necessary to attach an
+ *        interrupt
+ *    source before using it.
+ *
+ * @param vcpu_cap
+ *    A virtual CPU capability.
+ * @param irq_cap
+ *    A virtual interrupt capability.
+ * @param irq_num
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_private(okl4_kcap_t vcpu_cap, okl4_kcap_t irq_cap,
+        okl4_interrupt_number_t irq_num)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu_cap;
+    register uint32_t r1 asm("r1") = (uint32_t)irq_cap;
+    register uint32_t r2 asm("r2") = (uint32_t)irq_num;
+    __asm__ __volatile__(
+            ""hvc(5134)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_private(okl4_kcap_t vcpu_cap, okl4_kcap_t irq_cap,
+        okl4_interrupt_number_t irq_num)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu_cap;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)irq_cap;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)irq_num;
+    __asm__ __volatile__(
+            "" hvc(5134) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Register a Cell (domain) as the handler of an interrupt.
+ *
+ *    @details
+ *    The Microvisor virtual GIC API permits an interrupt source to be
+ *        dynamically
+ *    assigned to a specific IRQ number in a Cell or vCPU. An interrupt can
+ *        only
+ *    be assigned to one IRQ number, and one Cell or vCPU, at a time. This
+ *    operation attaches the interrupt to a Cell as a shared interrupt.
+ *
+ *    Interrupt sources are addressed using capabilities. This operation,
+ *        given
+ *    a capability for an interrupt that is not currently attached to any
+ *        handler,
+ *    can attach the interrupt at a given unused IRQ number. If the IRQ
+ *        number
+ *    is between 0 and 31 (the GIC SGI or Private Peripheral Interrupt
+ *        range), it
+ *    will return an error; if it is between 32 and 1019 (the GIC
+ *    Shared Peripheral Interrupt range), it will be attached to the
+ *        specified
+ *    Cell.
+ *
+ *    @note In most cases, interrupt sources are attached at system
+ *        construction
+ *    time by the OK Tool. It is not normally necessary to attach an
+ *        interrupt
+ *    source before using it.
+ *
+ * @param domain_cap
+ *    A domain capability.
+ * @param irq_cap
+ *    A virtual interrupt capability.
+ * @param irq_num
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_shared(okl4_kcap_t domain_cap, okl4_kcap_t irq_cap,
+        okl4_interrupt_number_t irq_num)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)domain_cap;
+    register uint32_t r1 asm("r1") = (uint32_t)irq_cap;
+    register uint32_t r2 asm("r2") = (uint32_t)irq_num;
+    __asm__ __volatile__(
+            ""hvc(5135)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_shared(okl4_kcap_t domain_cap, okl4_kcap_t irq_cap,
+        okl4_interrupt_number_t irq_num)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)domain_cap;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)irq_cap;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)irq_num;
+    __asm__ __volatile__(
+            "" hvc(5135) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Unregister an interrupt.
+ *
+ *    @details
+ *    Detach the given interrupt source from its registered handler. The
+ *        interrupt
+ *    will be deactivated and disabled, and will not be delivered again
+ *        until it
+ *    is reattached. However, if it is configured in edge triggering mode,
+ *        its
+ *    pending state will be preserved.
+ *
+ * @param irq_cap
+ *    A virtual interrupt capability.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_detach(okl4_kcap_t irq_cap)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq_cap;
+    __asm__ __volatile__(
+            ""hvc(5136)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_detach(okl4_kcap_t irq_cap)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq_cap;
+    __asm__ __volatile__(
+            "" hvc(5136) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable the interrupt distributor.
+ *
+ *    @details
+ *    This API enables the interrupt distributor, in the same form as
+ *        writing to
+ *    the enable bit in (\p GICD_CTLR).
+ *
+ * @param enable
+ *    A boolean value for GIC distributor enable.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_dist_enable(okl4_bool_t enable)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)enable;
+    __asm__ __volatile__(
+            ""hvc(5133)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_dist_enable(okl4_bool_t enable)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)enable;
+    __asm__ __volatile__(
+            "" hvc(5133) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Signal the end of the interrupt handling routine.
+ *
+ *    @details
+ *    This API informs the virtual GIC that handling for a given interrupt
+ *        has
+ *    completed. It marks the interrupt as inactive, and decreases the
+ *        running
+ *    interrupt priority of the calling vCPU. This may cause immediate
+ *        delivery of
+ *    another interrupt, possibly with the same number, if one is enabled
+ *        and
+ *    pending.
+ *
+ *    The specified interrupt number and source must match the active
+ *        interrupt
+ *    that was most recently returned by an @ref okl4_sys_interrupt_ack
+ *    invocation. If multiple interrupts have been acknowledged and not yet
+ *        ended,
+ *    they must be ended in the reversed order of their acknowledgement.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC CPU
+ *    Interface's End of Interrupt Register (\p GICC_EOIR), with \p EOImode
+ *    set to 0 in \p GICC_CTLR.
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param source
+ *    The ID of the originating vCPU of a Software-Generated Interrupt.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_eoi(okl4_interrupt_number_t irq, uint8_t source)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)source;
+    __asm__ __volatile__(
+            ""hvc(5129)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_eoi(okl4_interrupt_number_t irq, uint8_t source)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)source;
+    __asm__ __volatile__(
+            "" hvc(5129) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Retrieve the highest-priority pending interrupt.
+ *
+ *    @details
+ *    This API returns the number and source of the highest-priority
+ *        enabled,
+ *    pending and inactive interrupt that is targeted at the calling vCPU
+ *    and has higher priority than the calling vCPU's running group
+ *        priority.
+ *
+ *    If no such interrupt exists, interrupt number 1023 is returned. If
+ *        the
+ *    returned interrupt number is 16 or greater, the source ID is 0;
+ *        otherwise it
+ *    is the vCPU ID of the vCPU that raised the interrupt (which is always
+ *        in the
+ *    same Cell as the caller).
+ *
+ *    @note Invoking this API is equivalent to reading from the GIC CPU
+ *    Interface's Highest Priority Pending Interrupt Register (\p
+ *        GICC_HPPIR).
+ *
+ *
+ * @retval irq
+ *    An interrupt line number for the virtual GIC.
+ * @retval source
+ *    The ID of the originating vCPU of a Software-Generated Interrupt.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_highest_priority_pending_return
+_okl4_sys_interrupt_get_highest_priority_pending(void)
+{
+    struct _okl4_sys_interrupt_get_highest_priority_pending_return result;
+
+    register uint32_t r0 asm("r0");
+    register uint32_t r1 asm("r1");
+    __asm__ __volatile__(
+            ""hvc(5137)"\n\t"
+            : "=r"(r0), "=r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    result.irq = (okl4_interrupt_number_t)(r0);
+    result.source = (uint8_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_highest_priority_pending_return
+_okl4_sys_interrupt_get_highest_priority_pending(void)
+{
+    struct _okl4_sys_interrupt_get_highest_priority_pending_return result;
+
+    register okl4_register_t x0 asm("x0");
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5137) "\n\t"
+            : "=r"(x0), "=r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.irq = (okl4_interrupt_number_t)(x0);
+    result.source = (uint8_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Fetch the payload flags of a virtual interrupt.
+ *
+ *    @details
+ *    This fetches and clears the accumulated payload flags for a virtual
+ *    interrupt that has been raised by the Microvisor, or by a vCPU
+ *        invoking
+ *    the @ref okl4_sys_vinterrupt_raise API.
+ *
+ *    If the virtual interrupt is configured for level triggering, clearing
+ *        the
+ *    accumulated flags by calling this function will also clear the
+ *        pending state
+ *    of the interrupt.
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ * @retval payload
+ *    Accumulated virtual interrupt payload flags.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_payload_return
+_okl4_sys_interrupt_get_payload(okl4_interrupt_number_t irq)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp payload_tmp;
+    struct _okl4_sys_interrupt_get_payload_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5132)"\n\t"
+            : "=r"(r1), "=r"(r2), "+r"(r0)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    payload_tmp.words.lo = r1;
+    payload_tmp.words.hi = r2;
+    result.payload = (okl4_virq_flags_t)(payload_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_payload_return
+_okl4_sys_interrupt_get_payload(okl4_interrupt_number_t irq)
+{
+    struct _okl4_sys_interrupt_get_payload_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5132) "\n\t"
+            : "=r"(x1), "+r"(x0)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.payload = (okl4_virq_flags_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query the number of supported CPUs and interrupt lines.
+ *
+ *    @details
+ *    This API returns the number of CPUs and interrupt lines supported by
+ *        the
+ *    virtual interrupt controller, in the same form as is found in the GIC
+ *    Distributor's Interrupt Controller Type Register (\p GICD_TYPER), in
+ *    the \p CPUNumber and \p ITLinesNumber fields.
+ *
+ *
+ * @retval cpunumber
+ *    The number of supported target CPUs, minus 1.
+ * @retval itnumber
+ *    The number of supported groups of 32 interrupt lines, minus 1.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_limits_return
+_okl4_sys_interrupt_limits(void)
+{
+    struct _okl4_sys_interrupt_limits_return result;
+
+    register uint32_t r0 asm("r0");
+    register uint32_t r1 asm("r1");
+    __asm__ __volatile__(
+            ""hvc(5138)"\n\t"
+            : "=r"(r0), "=r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    result.cpunumber = (okl4_count_t)(r0);
+    result.itnumber = (okl4_count_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_limits_return
+_okl4_sys_interrupt_limits(void)
+{
+    struct _okl4_sys_interrupt_limits_return result;
+
+    register okl4_register_t x0 asm("x0");
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5138) "\n\t"
+            : "=r"(x0), "=r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.cpunumber = (okl4_count_t)(x0);
+    result.itnumber = (okl4_count_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Disable delivery of an interrupt.
+ *
+ *    @detail
+ *    This prevents future delivery of the specified interrupt. It does not
+ *    affect any currently active delivery (that is, end-of-interrupt must
+ *    still be called). It also does not affect the pending state, so it
+ *        cannot
+ *    cause loss of edge-triggered interrupts.
+ *
+ *    @note Invoking this API is equivalent to writing a single bit to one
+ *        of the
+ *    GIC Distributor's Interrupt Clear-Enable Registers (\p
+ *        GICD_ICENABLERn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_mask(okl4_interrupt_number_t irq)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    __asm__ __volatile__(
+            ""hvc(5130)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_mask(okl4_interrupt_number_t irq)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    __asm__ __volatile__(
+            "" hvc(5130) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Raise a Software-Generated Interrupt.
+ *
+ *    @detail
+ *    This allows a Software-Generated Interrupt (with interrupt number
+ *        between
+ *    0 and 15) to be raised, targeted at a specified set of vCPUs within
+ *        the
+ *    same Cell. No capability is required, but interrupts cannot be raised
+ *        to
+ *    other Cells with this API.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC
+ *        Distributor's
+ *    Software Generated Interrupt Register (\p GICD_SGIR).
+ *
+ *    @note This API is distinct from the @ref okl4_sys_vinterrupt_raise
+ *        API,
+ *    which raises a virtual interrupt source which may communicate across
+ *    Cell boundaries, and requires an explicit capability.
+ *
+ * @param sgir
+ *    A description of the Software-Generated Interrupt to raise.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_raise(okl4_gicd_sgir_t sgir)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)sgir;
+    __asm__ __volatile__(
+            ""hvc(5145)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_raise(okl4_gicd_sgir_t sgir)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)sgir;
+    __asm__ __volatile__(
+            "" hvc(5145) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Set the interrupt priority binary point for the calling vCPU.
+ *
+ *    @details
+ *    The GIC splits IRQ priority values into two subfields: the group
+ *        priority
+ *    and the subpriority. The binary point is the index of the most
+ *        significant
+ *    bit of the subpriority (that is, one less than the number of
+ *        subpriority
+ *    bits).
+ *
+ *    An interrupt can preempt another active interrupt only if its group
+ *        priority
+ *    is higher than the running group priority; the subpriority is ignored
+ *        for
+ *    this comparison. The subpriority is used to determine which of two
+ *        equal
+ *    priority interrupts will be delivered first.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC CPU
+ *    Interface's Binary Point Register (\p GICC_BPR).
+ *
+ * @param binary_point
+ *    The number of bits in the subpriority field, minus 1.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_binary_point(uint8_t binary_point)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)binary_point;
+    __asm__ __volatile__(
+            ""hvc(5139)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_binary_point(uint8_t binary_point)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)binary_point;
+    __asm__ __volatile__(
+            "" hvc(5139) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Change the configuration of an interrupt.
+ *
+ *    @detail
+ *    This sets the triggering type of a specified interrupt to either
+ *    edge or level triggering.
+ *
+ *    The specified interrupt must be disabled.
+ *
+ *    @note Some interrupt sources only support one triggering type. In
+ *        this case,
+ *    calling this API for the interrupt will have no effect.
+ *
+ *    @note Invoking this API is equivalent to writing a single two-bit
+ *        field of
+ *    one of the GIC Distributor's Interrupt Configuration Registers (\p
+ *    GICD_ICFGRn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param icfgr
+ *    The configuration bits for the interrupt line.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_config(okl4_interrupt_number_t irq,
+        okl4_gicd_icfgr_t icfgr)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)icfgr;
+    __asm__ __volatile__(
+            ""hvc(5140)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_config(okl4_interrupt_number_t irq,
+        okl4_gicd_icfgr_t icfgr)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)icfgr;
+    __asm__ __volatile__(
+            "" hvc(5140) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable or disable the signaling of interrupts to the vCPU.
+ *
+ *    @details
+ *    Enable or disable the signaling of interrupts by the virtual CPU
+ *        interface
+ *    to the connected vCPU.
+ *
+ *    @note Interrupt signalling is initially disabled, as required by the
+ *        GIC
+ *    API specification. This API must therefore be invoked at least once
+ *        before
+ *    any interrupts will be delivered.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC CPU
+ *    Interface's Control Register (\p GICC_CTLR) using the "GICv1 without
+ *    Security Extensions or Non-Secure" format, which contains only a
+ *        single
+ *    enable bit.
+ *
+ * @param enable
+ *    A boolean value for GIC distributor enable.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_control(okl4_bool_t enable)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)enable;
+    __asm__ __volatile__(
+            ""hvc(5141)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_control(okl4_bool_t enable)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)enable;
+    __asm__ __volatile__(
+            "" hvc(5141) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Change the delivery priority of an interrupt.
+ *
+ *    @detail
+ *    This changes the delivery priority of an interrupt. It has no
+ *        immediate
+ *    effect on currently active interrupts, but will take effect once the
+ *    interrupt is deactivated.
+ *
+ *    @note The number of significant bits in this value is
+ *    implementation-defined. In this configuration, 4 significant priority
+ *    bits are implemented. The most significant bit is always at the high
+ *        end
+ *    of the priority byte; that is, at bit 7.
+ *
+ *    @note Smaller values represent higher priority. The highest possible
+ *    priority is 0; the lowest possible priority has all implemented bits
+ *        set,
+ *    and in this implementation is currently 0xf0.
+ *
+ *    @note Invoking this API is equivalent to writing a single byte of one
+ *        of the
+ *    GIC Distributor's Interrupt Priority Registers (\p GICD_IPRIORITYn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param priority
+ *    A GIC priority value in the range 0-240.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority(okl4_interrupt_number_t irq, uint8_t priority)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)priority;
+    __asm__ __volatile__(
+            ""hvc(5142)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority(okl4_interrupt_number_t irq, uint8_t priority)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)priority;
+    __asm__ __volatile__(
+            "" hvc(5142) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Set the minimum interrupt priority of the calling vCPU.
+ *
+ *    @details
+ *    This API sets the calling vCPU's minimum running interrupt priority.
+ *    Interrupts will only be delivered if they have priority higher than
+ *        this
+ *    value.
+ *
+ *    @note Higher priority corresponds to a lower priority value; i.e.,
+ *        the
+ *    highest priority value is 0.
+ *
+ *    @note The priority mask is initially set to 0, which prevents all
+ *        interrupt
+ *    delivery, as required by the GIC API specification. This API must
+ *        therefore
+ *    be invoked at least once before any interrupts will be delivered.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC CPU
+ *    Interface's Interrupt Priority Mask Register (\p GICC_PMR).
+ *
+ * @param priority_mask
+ *    A GIC priority value in the range 0-240.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority_mask(uint8_t priority_mask)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)priority_mask;
+    __asm__ __volatile__(
+            ""hvc(5143)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority_mask(uint8_t priority_mask)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)priority_mask;
+    __asm__ __volatile__(
+            "" hvc(5143) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Change the delivery targets of a shared interrupt.
+ *
+ *    @detail
+ *    This sets the subset of a Cell's vCPUs to which the specified shared
+ *    interrupt (with an interrupt number between 32 and 1019) can be
+ *        delivered.
+ *    The target vCPUs are specified by an 8-bit bitfield. Note that no
+ *        more
+ *    than 8 targets are supported by the GIC API, so vCPUs with IDs beyond
+ *        8
+ *    will never receive interrupts.
+ *
+ *    @note The GIC API does not specify how or when the implementation
+ *        selects a
+ *    target for interrupt delivery. Most hardware implementations deliver
+ *        to
+ *    all possible targets simultaneously, and then cancel all but the
+ *        first to
+ *    be acknowledged. In the interests of efficiency, the OKL4 Microvisor
+ *        does
+ *    not implement this behaviour; instead, it chooses an arbitrary target
+ *        when
+ *    the interrupt first becomes deliverable.
+ *
+ *    @note Invoking this API is equivalent to writing a single byte of one
+ *        of the
+ *    GIC Distributor's Interrupt Targets Registers (\p GICD_ITARGETSRn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param cpu_mask
+ *    Bitmask of vCPU IDs.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_targets(okl4_interrupt_number_t irq, uint8_t cpu_mask)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)cpu_mask;
+    __asm__ __volatile__(
+            ""hvc(5144)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_targets(okl4_interrupt_number_t irq, uint8_t cpu_mask)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)cpu_mask;
+    __asm__ __volatile__(
+            "" hvc(5144) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable delivery of an interrupt.
+ *
+ *    @detail
+ *    This permits delivery of the specified interrupt, once it is pending
+ *        and
+ *    inactive and has sufficiently high priority.
+ *
+ *    @note Invoking this API is equivalent to writing a single bit to one
+ *        of the
+ *    GIC Distributor's Interrupt Set-Enable Registers (\p
+ *        GICD_ISENABLERn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_unmask(okl4_interrupt_number_t irq)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    __asm__ __volatile__(
+            ""hvc(5131)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_unmask(okl4_interrupt_number_t irq)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    __asm__ __volatile__(
+            "" hvc(5131) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enter the kernel interactive debugger.
+ *
+ * @details
+ * This is available on a debug build of the kernel, otherwise the operation
+ *     is a
+ * no-op.
+ *
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_kdb_interact(void)
+{
+    __asm__ __volatile__(
+            ""hvc(5120)"\n\t"
+            :
+            :
+            : "cc", "memory", "r0", "r1", "r2", "r3", "r4", "r5"
+            );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_kdb_interact(void)
+{
+    __asm__ __volatile__(
+            "" hvc(5120) "\n\t"
+            :
+            :
+            : "cc", "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Set the debug name of the addressed kernel object.
+ *
+ *    @details
+ *    The debug version of the Microvisor kernel supports naming of kernel
+ *        objects
+ *    to aid debugging. The object names are visible to external debuggers
+ *        such
+ *    as a JTAG tool, as well as the in-built interactive kernel debugger.
+ *
+ *    The target object may be any Microvisor object for which the caller
+ *        has a
+ *    capability with the master rights.
+ *
+ *    Debug names may be up to 16 characters long, with four characters
+ *        stored per
+ *    \p name[x] argument in little-endian order (on a 32-bit machine).
+ *
+ * @param object
+ *    The target kernel object id.
+ * @param name0
+ * @param name1
+ * @param name2
+ * @param name3
+ *
+ * @retval error
+ *    Resulting error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_kdb_set_object_name(okl4_kcap_t object, uint32_t name0, uint32_t name1
+        , uint32_t name2, uint32_t name3)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)object;
+    register uint32_t r1 asm("r1") = (uint32_t)name0;
+    register uint32_t r2 asm("r2") = (uint32_t)name1;
+    register uint32_t r3 asm("r3") = (uint32_t)name2;
+    register uint32_t r4 asm("r4") = (uint32_t)name3;
+    __asm__ __volatile__(
+            ""hvc(5121)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_kdb_set_object_name(okl4_kcap_t object, uint32_t name0, uint32_t name1
+        , uint32_t name2, uint32_t name3)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)object;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)name0;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)name1;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)name2;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)name3;
+    __asm__ __volatile__(
+            "" hvc(5121) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Call a kernel support package (KSP) defined interface.
+ *
+ *    @details
+ *    The KSP procedure call allows the caller to interact with customer
+ *    specific functions provided by the kernel support package. The caller
+ *    must possess a capability with the appropriate rights to a KSP agent
+ *        in
+ *    order to call this interface.
+ *
+ *    The remaining parameters provided are passed directly to the KSP
+ *        without
+ *    any inspection.
+ *
+ *    The KSP can return an error code and up to three return words.
+ *
+ * @param agent
+ *    The target KSP agent
+ * @param operation
+ *    The operation to be performed
+ * @param arg0
+ *    An argument for the operation
+ * @param arg1
+ *    An argument for the operation
+ * @param arg2
+ *    An argument for the operation
+ * @param arg3
+ *    An argument for the operation
+ *
+ * @retval error
+ *    The resulting error
+ * @retval ret0
+ *    A return value for the operation
+ * @retval ret1
+ *    A return value for the operation
+ * @retval ret2
+ *    A return value for the operation
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_ksp_procedure_call_return
+_okl4_sys_ksp_procedure_call(okl4_kcap_t agent, okl4_ksp_arg_t operation,
+        okl4_ksp_arg_t arg0, okl4_ksp_arg_t arg1, okl4_ksp_arg_t arg2,
+        okl4_ksp_arg_t arg3)
+{
+    struct _okl4_sys_ksp_procedure_call_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)agent;
+    register uint32_t r1 asm("r1") = (uint32_t)operation;
+    register uint32_t r2 asm("r2") = (uint32_t)arg0;
+    register uint32_t r3 asm("r3") = (uint32_t)arg1;
+    register uint32_t r4 asm("r4") = (uint32_t)arg2;
+    register uint32_t r5 asm("r5") = (uint32_t)arg3;
+    __asm__ __volatile__(
+            ""hvc(5197)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    result.ret0 = (okl4_ksp_arg_t)(r1);
+    result.ret1 = (okl4_ksp_arg_t)(r2);
+    result.ret2 = (okl4_ksp_arg_t)(r3);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_ksp_procedure_call_return
+_okl4_sys_ksp_procedure_call(okl4_kcap_t agent, okl4_ksp_arg_t operation,
+        okl4_ksp_arg_t arg0, okl4_ksp_arg_t arg1, okl4_ksp_arg_t arg2,
+        okl4_ksp_arg_t arg3)
+{
+    struct _okl4_sys_ksp_procedure_call_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)agent;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)operation;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)arg0;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)arg1;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)arg2;
+    register okl4_register_t x5 asm("x5") = (okl4_register_t)arg3;
+    __asm__ __volatile__(
+            "" hvc(5197) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+            :
+            : "cc", "memory", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.ret0 = (okl4_ksp_arg_t)(x1);
+    result.ret1 = (okl4_ksp_arg_t)(x2);
+    result.ret2 = (okl4_ksp_arg_t)(x3);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Attach a segment to an MMU.
+ *
+ *    @details
+ *    Before any mappings based on a segment can be established in the
+ *        MMU's
+ *    address space, the segment must be attached to the MMU. Attaching a
+ *        segment
+ *    serves to reference count the segment, preventing modifications to
+ *        the
+ *    segment being made.
+ *
+ *    A segment may be attached to an MMU multiple times, at the same or
+ *    different index. Each time a segment is attached to an MMU, the
+ *        attachment
+ *    reference count is incremented.
+ *
+ *    Attaching segments to an MMU is also important for VMMU objects in
+ *        that the
+ *    segment attachment index is used as a segment reference in the
+ *        virtual page
+ *    table format.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param segment_id
+ *    The target segment id.
+ * @param index
+ *    Index into the MMU's segment attachment table.
+ * @param perms
+ *    Mapping permissions.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_attach_segment(okl4_kcap_t mmu_id, okl4_kcap_t segment_id,
+        okl4_count_t index, okl4_page_perms_t perms)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)segment_id;
+    register uint32_t r2 asm("r2") = (uint32_t)index;
+    register uint32_t r3 asm("r3") = (uint32_t)perms;
+    __asm__ __volatile__(
+            ""hvc(5152)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_attach_segment(okl4_kcap_t mmu_id, okl4_kcap_t segment_id,
+        okl4_count_t index, okl4_page_perms_t perms)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)segment_id;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)perms;
+    __asm__ __volatile__(
+            "" hvc(5152) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Detach a segment from an MMU.
+ *
+ *    @details
+ *    A segment can be detached from an MMU or vMMU, causing its reference
+ *        count
+ *    to decrease. When the reference count reaches zero, the attachment is
+ *    removed and all mappings in the MMU object relating to the segment
+ *        are
+ *    removed.
+ *
+ *    The detach-segment operation is potentially a long running operation,
+ *    especially if invoked on a vMMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param index
+ *    Index into the MMU's segment attachment table.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_detach_segment(okl4_kcap_t mmu_id, okl4_count_t index)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)index;
+    __asm__ __volatile__(
+            ""hvc(5153)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_detach_segment(okl4_kcap_t mmu_id, okl4_count_t index)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)index;
+    __asm__ __volatile__(
+            "" hvc(5153) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Flush a range of virtual addresses from an MMU.
+ *
+ *    @details
+ *    This causes the kernel to remove all mappings covering the specified
+ *    virtual address range.
+ *
+ *    @note The size of the range must be a multiple of 1MB and the
+ *    starting virtual address must be 1MB aligned.
+ *    There is no support for flushing at a finer granularity.
+ *    If a fine grained flush is required, the caller should use the
+ *    @ref _okl4_sys_mmu_unmap_page operation.
+ *
+ *    The flush-range operation is potentially a long running operation.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    The starting virtual address of the range.
+ *    (Must be 1MB aligned)
+ * @param size
+ *    Size of the range. (Must be a multiple of 1MB)
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_lsize_tr_t size)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)size;
+    __asm__ __volatile__(
+            ""hvc(5154)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_lsize_tr_t size)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+    __asm__ __volatile__(
+            "" hvc(5154) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Flush a range of virtual addresses from an MMU.
+ *
+ *    @details
+ *    This causes the kernel to remove all mappings covering the specified
+ *    virtual address range.
+ *
+ *    @note The size of the range must be a multiple of 1MB and the
+ *    starting virtual address must be 1MB aligned.
+ *    There is no support for flushing at a finer granularity.
+ *    If a fine grained flush is required, the caller should use the
+ *    @ref _okl4_sys_mmu_unmap_page operation.
+ *
+ *    The flush-range operation is potentially a long running operation.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_lsize_pn_t count_pn)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)count_pn;
+    __asm__ __volatile__(
+            ""hvc(5155)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_lsize_pn_t count_pn)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)count_pn;
+    __asm__ __volatile__(
+            "" hvc(5155) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Lookup a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation performs a lookup in the MMU's pagetable for a mapping
+ *    derived from a specified segment.
+ *
+ *    If a mapping is found that is derived from the specified segment, the
+ *    operation will return the segment offset, size and the page
+ *        attributes
+ *    associated with the mapping.
+ *
+ *    If a segment_index value of OKL4_KCAP_INVALID is specified, the
+ *        operation
+ *    will search for a matching segment in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ *
+ * @retval error
+ *    Resulting error.
+ * @retval offset
+ *    Offset into the segment.
+ * @retval size
+ *    Size of the mapping, in bytes. Size will be one of the supported
+ *    machine page-sizes. If a segment search was performed, the lower
+ *        10-bits of
+ *    size contain the returned segment-index.
+ * @retval page_attr
+ *    Mapping attributes.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_page_return
+_okl4_sys_mmu_lookup_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp size_tmp;
+    struct _okl4_sys_mmu_lookup_page_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3");
+    register uint32_t r4 asm("r4");
+    __asm__ __volatile__(
+            ""hvc(5156)"\n\t"
+            : "=r"(r3), "=r"(r4), "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    result.offset = (okl4_psize_tr_t)(r1);
+    size_tmp.words.lo = r2;
+    size_tmp.words.hi = r3;
+    result.size = (okl4_mmu_lookup_size_t)(size_tmp.val);
+    result.page_attr = (_okl4_page_attribute_t)(r4);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_page_return
+_okl4_sys_mmu_lookup_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index)
+{
+    struct _okl4_sys_mmu_lookup_page_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3");
+    __asm__ __volatile__(
+            "" hvc(5156) "\n\t"
+            : "=r"(x3), "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.offset = (okl4_psize_tr_t)(x1);
+    result.size = (okl4_mmu_lookup_size_t)(x2);
+    result.page_attr = (_okl4_page_attribute_t)(x3);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Lookup a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation performs a lookup in the MMU's pagetable for a mapping
+ *    derived from a specified segment.
+ *
+ *    If a mapping is found that is derived from the specified segment, the
+ *    operation will return the segment offset, size and the page
+ *        attributes
+ *    associated with the mapping.
+ *
+ *    If a segment_index value of OKL4_KCAP_INVALID is specified, the
+ *        operation
+ *    will search for a matching segment in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ *
+ * @retval segment_index
+ *    Index into the MMU's segment attachment table, or error.
+ * @retval offset_pn
+ *    Offset into the segment in units of page numbers.
+ * @retval count_pn
+ *    The number of consecutive pages to map/unmap.
+ * @retval page_attr
+ *    Mapping attributes.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_pn_return
+_okl4_sys_mmu_lookup_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index)
+{
+    struct _okl4_sys_mmu_lookup_pn_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3");
+    __asm__ __volatile__(
+            ""hvc(5157)"\n\t"
+            : "=r"(r3), "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    result.segment_index = (okl4_mmu_lookup_index_t)(r0);
+    result.offset_pn = (okl4_psize_pn_t)(r1);
+    result.count_pn = (okl4_lsize_pn_t)(r2);
+    result.page_attr = (_okl4_page_attribute_t)(r3);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_pn_return
+_okl4_sys_mmu_lookup_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index)
+{
+    struct _okl4_sys_mmu_lookup_pn_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3");
+    __asm__ __volatile__(
+            "" hvc(5157) "\n\t"
+            : "=r"(x3), "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.segment_index = (okl4_mmu_lookup_index_t)(x0);
+    result.offset_pn = (okl4_psize_pn_t)(x1);
+    result.count_pn = (okl4_lsize_pn_t)(x2);
+    result.page_attr = (_okl4_page_attribute_t)(x3);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Create a mapping at a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation installs a new mapping into the MMU at the specified
+ *        virtual
+ *    address. The mapping's physical address is determined from the
+ *        specified
+ *    segment and offset, and the mapping's size and attributes are
+ *        provided in
+ *    \p size and \p page_attr.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param offset
+ *    Offset into the segment.
+ * @param size
+ *    Size of the mapping, in bytes.
+ * @param page_attr
+ *    Mapping attributes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_psize_tr_t offset, okl4_lsize_tr_t size
+        , _okl4_page_attribute_t page_attr)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)offset;
+    register uint32_t r4 asm("r4") = (uint32_t)size;
+    register uint32_t r5 asm("r5") = (uint32_t)page_attr;
+    __asm__ __volatile__(
+            ""hvc(5158)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_psize_tr_t offset, okl4_lsize_tr_t size
+        , _okl4_page_attribute_t page_attr)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)offset;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)size;
+    register okl4_register_t x5 asm("x5") = (okl4_register_t)page_attr;
+    __asm__ __volatile__(
+            "" hvc(5158) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+            :
+            : "cc", "memory", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Create a mapping at a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation installs a new mapping into the MMU at the specified
+ *        virtual
+ *    address. The mapping's physical address is determined from the
+ *        specified
+ *    segment and offset, and the mapping's size and attributes are
+ *        provided in
+ *    \p size and \p page_attr.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param segment_offset_pn
+ *    Offset into the segment in units of page numbers.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ * @param page_attr
+ *    Mapping attributes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_psize_pn_t segment_offset_pn,
+        okl4_lsize_pn_t count_pn, _okl4_page_attribute_t page_attr)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)segment_offset_pn;
+    register uint32_t r4 asm("r4") = (uint32_t)count_pn;
+    register uint32_t r5 asm("r5") = (uint32_t)page_attr;
+    __asm__ __volatile__(
+            ""hvc(5159)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_psize_pn_t segment_offset_pn,
+        okl4_lsize_pn_t count_pn, _okl4_page_attribute_t page_attr)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)segment_offset_pn;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)count_pn;
+    register okl4_register_t x5 asm("x5") = (okl4_register_t)page_attr;
+    __asm__ __volatile__(
+            "" hvc(5159) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+            :
+            : "cc", "memory", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Remove a mapping at a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation removes a mapping from the MMU at the specified
+ *        virtual
+ *    address. The size and address specified must match the size and base
+ *    address of the mapping being removed.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param size
+ *    Size of the mapping, in bytes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_lsize_tr_t size)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)size;
+    __asm__ __volatile__(
+            ""hvc(5160)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_lsize_tr_t size)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+    __asm__ __volatile__(
+            "" hvc(5160) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Remove a mapping at a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation removes a mapping from the MMU at the specified
+ *        virtual
+ *    address. The size and address specified must match the size and base
+ *    address of the mapping being removed.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_lsize_pn_t count_pn)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)count_pn;
+    __asm__ __volatile__(
+            ""hvc(5161)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_lsize_pn_t count_pn)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)count_pn;
+    __asm__ __volatile__(
+            "" hvc(5161) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the cache attributes of a mapping in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param size
+ *    Size of the mapping, in bytes.
+ * @param attrs
+ *    Mapping cache attributes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_attrs(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_lsize_tr_t size,
+        okl4_page_cache_t attrs)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)size;
+    register uint32_t r4 asm("r4") = (uint32_t)attrs;
+    __asm__ __volatile__(
+            ""hvc(5162)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_attrs(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_lsize_tr_t size,
+        okl4_page_cache_t attrs)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)size;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)attrs;
+    __asm__ __volatile__(
+            "" hvc(5162) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the page permissions of a mapping in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param size
+ *    Size of the mapping, in bytes.
+ * @param perms
+ *    Mapping permissions.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_perms(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_lsize_tr_t size,
+        okl4_page_perms_t perms)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)size;
+    register uint32_t r4 asm("r4") = (uint32_t)perms;
+    __asm__ __volatile__(
+            ""hvc(5163)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_perms(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_lsize_tr_t size,
+        okl4_page_perms_t perms)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)size;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)perms;
+    __asm__ __volatile__(
+            "" hvc(5163) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the cache attributes of a mapping in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ * @param attrs
+ *    Mapping cache attributes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_attrs(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+        okl4_page_cache_t attrs)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)count_pn;
+    register uint32_t r4 asm("r4") = (uint32_t)attrs;
+    __asm__ __volatile__(
+            ""hvc(5164)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_attrs(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+        okl4_page_cache_t attrs)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)count_pn;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)attrs;
+    __asm__ __volatile__(
+            "" hvc(5164) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the page permissions of a mapping in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ * @param perms
+ *    Mapping permissions.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_perms(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+        okl4_page_perms_t perms)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)count_pn;
+    register uint32_t r4 asm("r4") = (uint32_t)perms;
+    __asm__ __volatile__(
+            ""hvc(5165)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_perms(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+        okl4_page_perms_t perms)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)count_pn;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)perms;
+    __asm__ __volatile__(
+            "" hvc(5165) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * A NULL system-call for latency measurement.
+ *
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_performance_null_syscall(void)
+{
+    register uint32_t r0 asm("r0");
+    __asm__ __volatile__(
+            ""hvc(5198)"\n\t"
+            : "=r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_performance_null_syscall(void)
+{
+    register okl4_register_t x0 asm("x0");
+    __asm__ __volatile__(
+            "" hvc(5198) "\n\t"
+            : "=r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * Control a pipe, including reset, ready and halt functionality.
+ *
+ * @param pipe_id
+ *    The capability identifier of the pipe.
+ * @param control
+ *    The state control argument.
+ *
+ * @retval error
+ *    The returned error code.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_control(okl4_kcap_t pipe_id, okl4_pipe_control_t control)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)pipe_id;
+    register uint32_t r1 asm("r1") = (uint32_t)control;
+    __asm__ __volatile__(
+            ""hvc(5146)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_control(okl4_kcap_t pipe_id, okl4_pipe_control_t control)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)pipe_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)control;
+    __asm__ __volatile__(
+            "" hvc(5146) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * Send a message from a microvisor pipe.
+ *
+ * @param pipe_id
+ *    The capability identifier of the pipe.
+ * @param buf_size
+ *    Size of the receive buffer.
+ * @param data
+ *    Pointer to receive buffer.
+ *
+ * @retval error
+ *    The returned error code.
+ * @retval size
+ *    Size of the received message.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_pipe_recv_return
+_okl4_sys_pipe_recv(okl4_kcap_t pipe_id, okl4_vsize_t buf_size, uint8_t *data)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp size_tmp;
+    struct _okl4_sys_pipe_recv_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)pipe_id;
+    register uint32_t r1 asm("r1") = (uint32_t)buf_size;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)data;
+    __asm__ __volatile__(
+            ""hvc(5147)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    size_tmp.words.lo = r1;
+    size_tmp.words.hi = r2;
+    result.size = (okl4_ksize_t)(size_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_pipe_recv_return
+_okl4_sys_pipe_recv(okl4_kcap_t pipe_id, okl4_vsize_t buf_size, uint8_t *data)
+{
+    struct _okl4_sys_pipe_recv_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)pipe_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)buf_size;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)data;
+    __asm__ __volatile__(
+            "" hvc(5147) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.size = (okl4_ksize_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * Send a message to a microvisor pipe.
+ *
+ * @param pipe_id
+ *    The capability identifier of the pipe.
+ * @param size
+ *    Size of the message to send.
+ * @param data
+ *    Pointer to the message payload to send.
+ *
+ * @retval error
+ *    The returned error code.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_send(okl4_kcap_t pipe_id, okl4_vsize_t size, const uint8_t *data)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)pipe_id;
+    register uint32_t r1 asm("r1") = (uint32_t)size;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)data;
+    __asm__ __volatile__(
+            ""hvc(5148)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_send(okl4_kcap_t pipe_id, okl4_vsize_t size, const uint8_t *data)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)pipe_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)size;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)data;
+    __asm__ __volatile__(
+            "" hvc(5148) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Waive the current vCPU's priority.
+ *
+ *    @details
+ *    This operation allows a vCPU to change its waived priority. A vCPU
+ *        has
+ *    both a base priority and its current priority.
+ *
+ *    The base priority is the statically assigned maximum priority that a
+ *        vCPU
+ *    has been given. The current priority is the priority used for system
+ *    scheduling and is limited to the range of zero to the base priority.
+ *
+ *    The `waive-priority` operation allows a vCPU to set its current
+ *        priority
+ *    and is normally used to reduce its current priority. This allows a
+ *        vCPU to
+ *    perform work at a lower system priority, and supports the interleaved
+ *    scheduling feature.
+ *
+ *    A vCPU's priority is restored to its base priority whenever an
+ *        interrupt
+ *    that has the vCPU registered as its handler is raised. This allows
+ *    interrupt handling and guest operating systems to return to the base
+ *    priority to potentially do higher priority work.
+ *
+ *    After calling this interface an immediate reschedule will be
+ *        performed.
+ *
+ * @param priority
+ *    New vCPU priority.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_priority_waive(okl4_priority_t priority)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)priority;
+    __asm__ __volatile__(
+            ""hvc(5151)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_priority_waive(okl4_priority_t priority)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)priority;
+    __asm__ __volatile__(
+            "" hvc(5151) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_GET_REGISTER
+ *
+ * @param target
+ * @param reg_and_set
+ *
+ * @retval reg_w0
+ * @retval reg_w1
+ * @retval reg_w2
+ * @retval reg_w3
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_remote_get_register_return
+_okl4_sys_remote_get_register(okl4_kcap_t target,
+        okl4_register_and_set_t reg_and_set)
+{
+    struct _okl4_sys_remote_get_register_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)reg_and_set;
+    register uint32_t r2 asm("r2");
+    register uint32_t r3 asm("r3");
+    register uint32_t r4 asm("r4");
+    __asm__ __volatile__(
+            ""hvc(5200)"\n\t"
+            : "=r"(r2), "=r"(r3), "=r"(r4), "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    result.reg_w0 = (uint32_t)(r0);
+    result.reg_w1 = (uint32_t)(r1);
+    result.reg_w2 = (uint32_t)(r2);
+    result.reg_w3 = (uint32_t)(r3);
+    result.error = (okl4_error_t)(r4);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_remote_get_register_return
+_okl4_sys_remote_get_register(okl4_kcap_t target,
+        okl4_register_and_set_t reg_and_set)
+{
+    struct _okl4_sys_remote_get_register_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)reg_and_set;
+    register okl4_register_t x2 asm("x2");
+    register okl4_register_t x3 asm("x3");
+    register okl4_register_t x4 asm("x4");
+    __asm__ __volatile__(
+            "" hvc(5200) "\n\t"
+            : "=r"(x2), "=r"(x3), "=r"(x4), "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    result.reg_w0 = (uint32_t)(x0);
+    result.reg_w1 = (uint32_t)(x1);
+    result.reg_w2 = (uint32_t)(x2);
+    result.reg_w3 = (uint32_t)(x3);
+    result.error = (okl4_error_t)(x4);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_GET_REGISTERS
+ *
+ * @param target
+ * @param set
+ * @param regs
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_get_registers(okl4_kcap_t target, okl4_register_set_t set,
+        void *regs)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)set;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)regs;
+    __asm__ __volatile__(
+            ""hvc(5201)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_get_registers(okl4_kcap_t target, okl4_register_set_t set,
+        void *regs)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)set;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)regs;
+    __asm__ __volatile__(
+            "" hvc(5201) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_READ_MEMORY32
+ *
+ * @param target
+ * @param address
+ *
+ * @retval data
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_remote_read_memory32_return
+_okl4_sys_remote_read_memory32(okl4_kcap_t target, okl4_laddr_t address)
+{
+    struct _okl4_sys_remote_read_memory32_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)(address        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((address >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5202)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.data = (uint32_t)(r0);
+    result.error = (okl4_error_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_remote_read_memory32_return
+_okl4_sys_remote_read_memory32(okl4_kcap_t target, okl4_laddr_t address)
+{
+    struct _okl4_sys_remote_read_memory32_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)address;
+    __asm__ __volatile__(
+            "" hvc(5202) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.data = (uint32_t)(x0);
+    result.error = (okl4_error_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_SET_REGISTER
+ *
+ * @param target
+ * @param reg_and_set
+ * @param reg_w0
+ * @param reg_w1
+ * @param reg_w2
+ * @param reg_w3
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_register(okl4_kcap_t target,
+        okl4_register_and_set_t reg_and_set, uint32_t reg_w0, uint32_t reg_w1,
+        uint32_t reg_w2, uint32_t reg_w3)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)reg_and_set;
+    register uint32_t r2 asm("r2") = (uint32_t)reg_w0;
+    register uint32_t r3 asm("r3") = (uint32_t)reg_w1;
+    register uint32_t r4 asm("r4") = (uint32_t)reg_w2;
+    register uint32_t r5 asm("r5") = (uint32_t)reg_w3;
+    __asm__ __volatile__(
+            ""hvc(5203)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_register(okl4_kcap_t target,
+        okl4_register_and_set_t reg_and_set, uint32_t reg_w0, uint32_t reg_w1,
+        uint32_t reg_w2, uint32_t reg_w3)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)reg_and_set;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)reg_w0;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)reg_w1;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)reg_w2;
+    register okl4_register_t x5 asm("x5") = (okl4_register_t)reg_w3;
+    __asm__ __volatile__(
+            "" hvc(5203) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+            :
+            : "cc", "memory", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_SET_REGISTERS
+ *
+ * @param target
+ * @param set
+ * @param regs
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_registers(okl4_kcap_t target, okl4_register_set_t set,
+        void *regs)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)set;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)regs;
+    __asm__ __volatile__(
+            ""hvc(5204)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_registers(okl4_kcap_t target, okl4_register_set_t set,
+        void *regs)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)set;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)regs;
+    __asm__ __volatile__(
+            "" hvc(5204) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_WRITE_MEMORY32
+ *
+ * @param target
+ * @param address
+ * @param data
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_write_memory32(okl4_kcap_t target, okl4_laddr_t address,
+        uint32_t data)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)(address        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((address >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)data;
+    __asm__ __volatile__(
+            ""hvc(5205)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_write_memory32(okl4_kcap_t target, okl4_laddr_t address,
+        uint32_t data)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)address;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)data;
+    __asm__ __volatile__(
+            "" hvc(5205) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * Retrieve suspend status.
+ *
+ * @param scheduler_id
+ *    The scheduler capability identifier.
+ *
+ * @retval error
+ *    Resulting error.
+ * @retval power_suspend_version
+ *    The power suspend versioning number
+ * @retval power_suspend_running_count
+ *    The number of running power_suspend watched vCPUs
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_metrics_status_suspended_return
+_okl4_sys_schedule_metrics_status_suspended(okl4_kcap_t scheduler_id)
+{
+    struct _okl4_sys_schedule_metrics_status_suspended_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5206)"\n\t"
+            : "=r"(r1), "=r"(r2), "+r"(r0)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    result.power_suspend_version = (uint32_t)(r1);
+    result.power_suspend_running_count = (uint32_t)(r2);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_metrics_status_suspended_return
+_okl4_sys_schedule_metrics_status_suspended(okl4_kcap_t scheduler_id)
+{
+    struct _okl4_sys_schedule_metrics_status_suspended_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+    register okl4_register_t x1 asm("x1");
+    register okl4_register_t x2 asm("x2");
+    __asm__ __volatile__(
+            "" hvc(5206) "\n\t"
+            : "=r"(x1), "=r"(x2), "+r"(x0)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.power_suspend_version = (uint32_t)(x1);
+    result.power_suspend_running_count = (uint32_t)(x2);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * Register a vCPU for suspend count tracking.
+ *
+ * @param scheduler_id
+ *    The scheduler capability identifier.
+ * @param vcpu_id
+ *    The target vCPU capability identifier.
+ * @param watch
+ *    Whether to register or unregister
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_metrics_watch_suspended(okl4_kcap_t scheduler_id,
+        okl4_kcap_t vcpu_id, okl4_bool_t watch)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vcpu_id;
+    register uint32_t r2 asm("r2") = (uint32_t)watch;
+    __asm__ __volatile__(
+            ""hvc(5207)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_metrics_watch_suspended(okl4_kcap_t scheduler_id,
+        okl4_kcap_t vcpu_id, okl4_bool_t watch)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vcpu_id;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)watch;
+    __asm__ __volatile__(
+            "" hvc(5207) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Disable profiling of a physical CPU.
+ *
+ * @param phys_cpu
+ *    The physical CPU capability id.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_disable(okl4_kcap_t phys_cpu)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)phys_cpu;
+    __asm__ __volatile__(
+            ""hvc(5168)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_disable(okl4_kcap_t phys_cpu)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)phys_cpu;
+    __asm__ __volatile__(
+            "" hvc(5168) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable profiling of a physical CPU.
+ *
+ *    This operation enables profiling of physical CPU related properties
+ *        such as
+ *    core usage and context switch count.
+ *
+ * @param phys_cpu
+ *    The physical CPU capability id.
+ *
+ * @retval error
+ *    Resulting error.
+ * @retval timestamp
+ *    The current timestamp.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_cpu_enable_return
+_okl4_sys_schedule_profile_cpu_enable(okl4_kcap_t phys_cpu)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp timestamp_tmp;
+    struct _okl4_sys_schedule_profile_cpu_enable_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)phys_cpu;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5169)"\n\t"
+            : "=r"(r1), "=r"(r2), "+r"(r0)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    timestamp_tmp.words.lo = r1;
+    timestamp_tmp.words.hi = r2;
+    result.timestamp = (uint64_t)(timestamp_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_cpu_enable_return
+_okl4_sys_schedule_profile_cpu_enable(okl4_kcap_t phys_cpu)
+{
+    struct _okl4_sys_schedule_profile_cpu_enable_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)phys_cpu;
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5169) "\n\t"
+            : "=r"(x1), "+r"(x0)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.timestamp = (uint64_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Retrieve profiling data relating to a physical CPU core.
+ *
+ *    @details
+ *    This operation returns a set of profiling data relating to a physical
+ *        CPU.
+ *    A timestamp of the current system time in units of microseconds is
+ *        recorded
+ *    during the operation. The remaining data fields indicate runtime and
+ *    number of events since the last invocation of this operation.
+ *
+ *    After the profiling data is retrieved, the kernel resets all metrics
+ *        to
+ *    zero.
+ *
+ *    @par profile data
+ *    For a physical CPU, the returned data is:
+ *    - \p cpu_time: Idle time of the CPU in microseconds.
+ *    - \p context_switches: Number of context switches on this core.
+ *    - \p enabled: True if profiling is enabled on this CPU.
+ *
+ * @param phys_cpu
+ *    The physical CPU capability id.
+ * @param profile
+ *    `return by reference`. Profiling data.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_get_data(okl4_kcap_t phys_cpu,
+        struct okl4_schedule_profile_data *profile)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)phys_cpu;
+    register uint32_t r1 asm("r1") = (uint32_t)(uintptr_t)profile;
+    __asm__ __volatile__(
+            ""hvc(5170)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_get_data(okl4_kcap_t phys_cpu,
+        struct okl4_schedule_profile_data *profile)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)phys_cpu;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)(uintptr_t)profile;
+    __asm__ __volatile__(
+            "" hvc(5170) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Disable profiling of a vCPU.
+ *
+ * @param vcpu
+ *    The target vCPU id.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_disable(okl4_kcap_t vcpu)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    __asm__ __volatile__(
+            ""hvc(5171)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_disable(okl4_kcap_t vcpu)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    __asm__ __volatile__(
+            "" hvc(5171) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable profiling of a vCPU.
+ *
+ *    This operation enables profiling of vCPU related properties such as
+ *    execution time and context switch count.
+ *
+ * @param vcpu
+ *    The target vCPU id.
+ *
+ * @retval error
+ *    Resulting error.
+ * @retval timestamp
+ *    The current timestamp.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_vcpu_enable_return
+_okl4_sys_schedule_profile_vcpu_enable(okl4_kcap_t vcpu)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp timestamp_tmp;
+    struct _okl4_sys_schedule_profile_vcpu_enable_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5172)"\n\t"
+            : "=r"(r1), "=r"(r2), "+r"(r0)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    timestamp_tmp.words.lo = r1;
+    timestamp_tmp.words.hi = r2;
+    result.timestamp = (uint64_t)(timestamp_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_vcpu_enable_return
+_okl4_sys_schedule_profile_vcpu_enable(okl4_kcap_t vcpu)
+{
+    struct _okl4_sys_schedule_profile_vcpu_enable_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5172) "\n\t"
+            : "=r"(x1), "+r"(x0)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.timestamp = (uint64_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Retrieve profiling data relating to a vCPU.
+ *
+ *    @details
+ *    This operation returns a set of profiling data relating to a vCPU.
+ *    A timestamp of the current system time in units of microseconds is
+ *        recorded
+ *    during the operation. The remaining data fields indicate runtime and
+ *    number of events since the last invocation of this operation.
+ *
+ *    After the profiling data is retrieved, the kernel resets all metrics
+ *        to
+ *    zero.
+ *
+ *    @par profile data
+ *    For a vCPU, the returned data is:
+ *    - \p cpu_time: Execution time of the vCPU in microseconds.
+ *    - \p context_switches: Number of context switches.
+ *    - \p cpu_migrations: Number of migrations between physical CPUs.
+ *    - \p enabled: True if profiling is enabled on this CPU.
+ *
+ * @param vcpu
+ *    The target vCPU id.
+ * @param profile
+ *    `return by reference`. Profiling data.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_get_data(okl4_kcap_t vcpu,
+        struct okl4_schedule_profile_data *profile)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    register uint32_t r1 asm("r1") = (uint32_t)(uintptr_t)profile;
+    __asm__ __volatile__(
+            ""hvc(5173)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_get_data(okl4_kcap_t vcpu,
+        struct okl4_schedule_profile_data *profile)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)(uintptr_t)profile;
+    __asm__ __volatile__(
+            "" hvc(5173) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: SCHEDULER_SUSPEND
+ *
+ * @param scheduler_id
+ * @param power_state
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_scheduler_suspend(okl4_kcap_t scheduler_id,
+        okl4_power_state_t power_state)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+    register uint32_t r1 asm("r1") = (uint32_t)power_state;
+    __asm__ __volatile__(
+            ""hvc(5150)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_scheduler_suspend(okl4_kcap_t scheduler_id,
+        okl4_power_state_t power_state)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)power_state;
+    __asm__ __volatile__(
+            "" hvc(5150) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Cancel an active timeout on a specified timer.
+ *
+ *    @details
+ *    This operation cancels an active timeout on a specified timer. The
+ *    operation returns the time that was remaining on the cancelled
+ *        timeout.
+ *    If there was not an active timeout, the operation returns an error.
+ *
+ *    The returned remaining time is formatted in the requested units from
+ *        the
+ *    \p flags argument.
+ *
+ *    The operation will also return the \p old_flags field indicating
+ *        whether
+ *    the canceled timeout was periodic or one-shot and whether it was an
+ *    absolute or relative timeout.
+ *
+ *    @par flags
+ *    - If the \p units flag is set, the remaining time is returned in
+ *        units
+ *    of timer ticks. The length of a timer tick is KSP defined and may be
+ *    obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ *    - If the \p units flag is not set, the remaining time is returned in
+ *    nanoseconds.
+ *
+ *    @par old_flags
+ *    - If the \p periodic flag is set, the cancelled timeout was periodic.
+ *    - If the \p periodic flag is not set, the cancelled timeout was
+ *    one-shot.
+ *    - If the \p absolute flag is set, the cancelled timeout was an
+ *    absolute time.
+ *    - If the \p absolute flag is not set, the cancelled timeout was a
+ *    relative time.
+ *
+ * @param timer
+ *    The target timer capability.
+ * @param flags
+ *    Flags for the requested operation.
+ *
+ * @retval remaining
+ *    Time that was remaining on the cancelled timeout.
+ * @retval old_flags
+ *    Flags relating to the cancelled timeout.
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_cancel_return
+_okl4_sys_timer_cancel(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp remaining_tmp;
+    struct _okl4_sys_timer_cancel_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1") = (uint32_t)flags;
+    register uint32_t r2 asm("r2");
+    register uint32_t r3 asm("r3");
+    __asm__ __volatile__(
+            ""hvc(5176)"\n\t"
+            : "=r"(r2), "=r"(r3), "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    remaining_tmp.words.lo = r0;
+    remaining_tmp.words.hi = r1;
+    result.remaining = (uint64_t)(remaining_tmp.val);
+    result.old_flags = (okl4_timer_flags_t)(r2);
+    result.error = (okl4_error_t)(r3);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_cancel_return
+_okl4_sys_timer_cancel(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    struct _okl4_sys_timer_cancel_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)flags;
+    register okl4_register_t x2 asm("x2");
+    __asm__ __volatile__(
+            "" hvc(5176) "\n\t"
+            : "=r"(x2), "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.remaining = (uint64_t)(x0);
+    result.old_flags = (okl4_timer_flags_t)(x1);
+    result.error = (okl4_error_t)(x2);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query the timer frequency and obtain time conversion constants.
+ *
+ *    @details
+ *    This operation returns the timer frequency and the conversion
+ *        constants
+ *    that may be used to convert between units of nanoseconds and units of
+ *    ticks.
+ *
+ *    The timer frequency is returned as a 64-bit value in units of
+ *        micro-hertz.
+ *    (1000000 = 1Hz).
+ *    The timer resolution (or period) can be calculated from the
+ *        frequency.
+ *
+ *    The time conversion constants are retuned as values \p a and \p b
+ *        which can
+ *    be used for unit conversions as follows:
+ *    - ns = (ticks) * \p a / \p b
+ *    - ticks = (ns * \p b) / \p a
+ *
+ *    @note
+ *    The constants are provided by the KSP module and are designed to be
+ *        used
+ *    for simple overflow-free computation using 64-bit arithmetic covering
+ *        the
+ *    time values from 0 to 2 years.
+ *
+ * @param timer
+ *    The target timer capability.
+ *
+ * @retval tick_freq
+ *    The timer frequency [in units of micro-hertz].
+ * @retval a
+ *    Ticks to nanoseconds conversion multiplier.
+ * @retval b
+ *    Ticks to nanoseconds conversion divisor.
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_resolution_return
+_okl4_sys_timer_get_resolution(okl4_kcap_t timer)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp tick_freq_tmp;
+    struct _okl4_sys_timer_get_resolution_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    register uint32_t r3 asm("r3");
+    register uint32_t r4 asm("r4");
+    __asm__ __volatile__(
+            ""hvc(5177)"\n\t"
+            : "=r"(r1), "=r"(r2), "=r"(r3), "=r"(r4), "+r"(r0)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    tick_freq_tmp.words.lo = r0;
+    tick_freq_tmp.words.hi = r1;
+    result.tick_freq = (uint64_t)(tick_freq_tmp.val);
+    result.a = (uint32_t)(r2);
+    result.b = (uint32_t)(r3);
+    result.error = (okl4_error_t)(r4);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_resolution_return
+_okl4_sys_timer_get_resolution(okl4_kcap_t timer)
+{
+    struct _okl4_sys_timer_get_resolution_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1");
+    register okl4_register_t x2 asm("x2");
+    register okl4_register_t x3 asm("x3");
+    __asm__ __volatile__(
+            "" hvc(5177) "\n\t"
+            : "=r"(x1), "=r"(x2), "=r"(x3), "+r"(x0)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.tick_freq = (uint64_t)(x0);
+    result.a = (uint32_t)(x1);
+    result.b = (uint32_t)(x2);
+    result.error = (okl4_error_t)(x3);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query the current system time.
+ *
+ *    @details
+ *    This operation returns the current absolute system time. The \p flags
+ *    argument is used to specify the desired units for the return value.
+ *
+ *    - Absolute time is based on an arbitrary time zero, defined to be at
+ *    or before the time of boot.
+ *
+ *    @par flags
+ *    - If the \p units flag is set, the time is returned in units
+ *    of timer ticks. The length of a timer tick is KSP defined and may
+ *    be obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ *    - If the \p units flag is not set, the time is returned in
+ *    terms of nanoseconds.
+ *
+ * @param timer
+ *    The target timer capability.
+ * @param flags
+ *    Flags for the requested operation.
+ *
+ * @retval time
+ *    The current system time.
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_time_return
+_okl4_sys_timer_get_time(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp time_tmp;
+    struct _okl4_sys_timer_get_time_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1") = (uint32_t)flags;
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5178)"\n\t"
+            : "=r"(r2), "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    time_tmp.words.lo = r0;
+    time_tmp.words.hi = r1;
+    result.time = (uint64_t)(time_tmp.val);
+    result.error = (okl4_error_t)(r2);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_time_return
+_okl4_sys_timer_get_time(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    struct _okl4_sys_timer_get_time_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)flags;
+    __asm__ __volatile__(
+            "" hvc(5178) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.time = (uint64_t)(x0);
+    result.error = (okl4_error_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query a timer about an active timeout.
+ *
+ *    @details
+ *    The operation queries a timer about an active timeout. If there is no
+ *    active timeout, this operation returns an error.
+ *
+ *    If the timer has an active timeout, this operation returns the
+ *        remaining
+ *    time and the flags associated with the timeout. The remaining time is
+ *    returned in the requested units from the \p flags argument.
+ *
+ *    The operation also returns the \p active_flags field indicating
+ *        whether the
+ *    active timeout is periodic or one-shot and whether it was an absolute
+ *        or
+ *    relative timeout.
+ *
+ *    @par flags
+ *    - If the \p units flag is set, the remaining time is returned in
+ *        units
+ *    of timer ticks. The length of a timer tick is KSP defined and may
+ *    be obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ *    - If the \p units flag is not set, the remaining time is returned in
+ *    units of nanoseconds.
+ *
+ *    @par active_flags
+ *    - If the \p periodic flag is set, the timeout is periodic.
+ *    - If the \p periodic flag is not set, the timeout is one-shot.
+ *    - If the \p absolute flag is set, the timeout is an absolute time.
+ *    - If the \p absolute flag is not set, the timeout is a relative time.
+ *
+ * @param timer
+ *    The target timer capability.
+ * @param flags
+ *    Flags for the requested operation.
+ *
+ * @retval remaining
+ *    Time remaining before the next timeout.
+ * @retval active_flags
+ *    Flags relating to the active timeout.
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_query_return
+_okl4_sys_timer_query(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp remaining_tmp;
+    struct _okl4_sys_timer_query_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1") = (uint32_t)flags;
+    register uint32_t r2 asm("r2");
+    register uint32_t r3 asm("r3");
+    __asm__ __volatile__(
+            ""hvc(5179)"\n\t"
+            : "=r"(r2), "=r"(r3), "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    remaining_tmp.words.lo = r0;
+    remaining_tmp.words.hi = r1;
+    result.remaining = (uint64_t)(remaining_tmp.val);
+    result.active_flags = (okl4_timer_flags_t)(r2);
+    result.error = (okl4_error_t)(r3);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_query_return
+_okl4_sys_timer_query(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    struct _okl4_sys_timer_query_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)flags;
+    register okl4_register_t x2 asm("x2");
+    __asm__ __volatile__(
+            "" hvc(5179) "\n\t"
+            : "=r"(x2), "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.remaining = (uint64_t)(x0);
+    result.active_flags = (okl4_timer_flags_t)(x1);
+    result.error = (okl4_error_t)(x2);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Start a timer with a specified timeout.
+ *
+ *    @details
+ *    This operation optionally resets then starts a timer with a new
+ *        timeout.
+ *    The specified timeout may be an `absolute` or `relative` time, may be
+ *    `one-shot` or `periodic` and may be specified in units of nanoseconds
+ *        or
+ *    ticks.
+ *
+ *    @par flags
+ *    - If the \p absolute flag is set, the timeout is treated as an
+ *    absolute time based on an arbitrary time zero, defined to be at or
+ *    before the time of boot.
+ *    - If the \p absolute flag is not set, the timeout is treated as a
+ *    relative time a specified amount of into the future. E.g. 10ms from
+ *    now.
+ *    - If the \p periodic flag is set, the timeout is treated as a
+ *        periodic
+ *    timeout that repeats with a period equal to the specified timeout.
+ *    - If the \p periodic flag is not set, the timeout is treated as a
+ *    one-shot timeout that expires at the specified time and does not
+ *    repeat.
+ *    - If the \p units flag is set, the timeout is specified in units of
+ *    timer ticks. The length of a timer tick is KSP defined and may be
+ *    obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ *    - If the \p units flag is not set, the timeout is specified in units
+ *    of nanoseconds.
+ *    - The \p reload flag allows an active timeout to be cancelled and the
+ *    new timeout is programmed into the timer.
+ *
+ * @param timer
+ *    The target timer capability.
+ * @param timeout
+ *    The timeout value.
+ * @param flags
+ *    Flags for the requested operation.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_timer_start(okl4_kcap_t timer, uint64_t timeout,
+        okl4_timer_flags_t flags)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1") = (uint32_t)(timeout        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((timeout >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)flags;
+    __asm__ __volatile__(
+            ""hvc(5180)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_timer_start(okl4_kcap_t timer, uint64_t timeout,
+        okl4_timer_flags_t flags)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)timeout;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)flags;
+    __asm__ __volatile__(
+            "" hvc(5180) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: TRACEBUFFER_SYNC
+ *
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_tracebuffer_sync(void)
+{
+    __asm__ __volatile__(
+            ""hvc(5199)"\n\t"
+            :
+            :
+            : "cc", "memory", "r0", "r1", "r2", "r3", "r4", "r5"
+            );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_tracebuffer_sync(void)
+{
+    __asm__ __volatile__(
+            "" hvc(5199) "\n\t"
+            :
+            :
+            : "cc", "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Reset a vCPU.
+ *
+ *    @details
+ *    This operation resets a vCPU to its boot state.
+ *
+ * @param vcpu
+ *    The target vCPU capability.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_reset(okl4_kcap_t vcpu)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    __asm__ __volatile__(
+            ""hvc(5122)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_reset(okl4_kcap_t vcpu)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    __asm__ __volatile__(
+            "" hvc(5122) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Start a vCPU executing.
+ *
+ *    @details
+ *    This operation starts a stopped vCPU, at an optionally specified
+ *    instruction pointer. If instruction pointer is not to be set the
+ *    value at the previous stop is preserved.
+ *
+ * @param vcpu
+ *    The target vCPU capability.
+ * @param set_ip
+ *    Should the instruction pointer be set.
+ * @param ip
+ *    Instruction pointer to start the vCPU at.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_start(okl4_kcap_t vcpu, okl4_bool_t set_ip, void *ip)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    register uint32_t r1 asm("r1") = (uint32_t)set_ip;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)ip;
+    __asm__ __volatile__(
+            ""hvc(5123)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_start(okl4_kcap_t vcpu, okl4_bool_t set_ip, void *ip)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)set_ip;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)ip;
+    __asm__ __volatile__(
+            "" hvc(5123) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Stop a vCPU executing.
+ *
+ *    @details
+ *    This operation stops a vCPU's execution until next restarted.
+ *
+ * @param vcpu
+ *    The target vCPU capability.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_stop(okl4_kcap_t vcpu)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    __asm__ __volatile__(
+            ""hvc(5124)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_stop(okl4_kcap_t vcpu)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    __asm__ __volatile__(
+            "" hvc(5124) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Switch a vCPU's execution mode between 32-bit and 64-bit.
+ *
+ *    @details
+ *    This operation resets a vCPU to its boot state, switches between
+ *        32-bit
+ *    and 64-bit modes, and restarts execution at the specified address.
+ *        The
+ *    start address must be valid in the vCPU's initial address space,
+ *        which may
+ *    not be the same as the caller's address space.
+ *
+ * @param vcpu
+ *    The target vCPU capability.
+ * @param to_64bit
+ *    The vCPU will reset in 64-bit mode if true; otherwise in 32-bit mode
+ * @param set_ip
+ *    Should the instruction pointer be set.
+ * @param ip
+ *    Instruction pointer to start the vCPU at.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_switch_mode(okl4_kcap_t vcpu, okl4_bool_t to_64bit,
+        okl4_bool_t set_ip, void *ip)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    register uint32_t r1 asm("r1") = (uint32_t)to_64bit;
+    register uint32_t r2 asm("r2") = (uint32_t)set_ip;
+    register uint32_t r3 asm("r3") = (uint32_t)(uintptr_t)ip;
+    __asm__ __volatile__(
+            ""hvc(5125)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_switch_mode(okl4_kcap_t vcpu, okl4_bool_t to_64bit,
+        okl4_bool_t set_ip, void *ip)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)to_64bit;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)set_ip;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)(uintptr_t)ip;
+    __asm__ __volatile__(
+            "" hvc(5125) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Signal a synchronization event.
+ *
+ *    @details
+ *    This operation sets the wakeup flags for all vCPUs in the caller's
+ *        domain.
+ *    If any vCPUs in the domain are waiting due to a pending `sync_wfe`
+ *        operation,
+ *    they will be released from the wait. The OKL4 scheduler will then
+ *        determine
+ *    which vCPUs should execute first based on their priority.
+ *
+ *    This `sync_sev` operation is non-blocking and is used to signal other
+ *        vCPUs
+ *    about some user-defined event. A typical use of this operation is to
+ *        signal
+ *    the release of a spinlock to other waiting vCPUs.
+ *
+ *    @see _okl4_sys_vcpu_sync_wfe
+ *
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_sev(void)
+{
+    __asm__ __volatile__(
+            ""hvc(5126)"\n\t"
+            :
+            :
+            : "cc", "memory", "r0", "r1", "r2", "r3", "r4", "r5"
+            );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_sev(void)
+{
+    __asm__ __volatile__(
+            "" hvc(5126) "\n\t"
+            :
+            :
+            : "cc", "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Wait for a synchronization event.
+ *
+ *    @details
+ *    This operation is used to defer the execution of a vCPU while it is
+ *        waiting
+ *    for an event. This operation is non-blocking, in that if no other
+ *        vCPUs in
+ *    the system are runnable, the operation will complete and the vCPU is
+ *        not
+ *    blocked. The `sync_wfe` operation uses the \p holder argument as a
+ *        hint to
+ *    the vCPU the caller is waiting on.
+ *
+ *    This operation first determines whether there is a pending wakeup
+ *        flag set
+ *    for the calling vCPU. If the flag is set, the operation clears the
+ *        flag and
+ *    returns immediately. If the caller has provided a valid \p holder id,
+ *        and
+ *    the holder is currently executing on a different physical core, the
+ *    operation again returns immediately.
+ *
+ *    In all other cases, the Microvisor records that the vCPU is waiting
+ *        and
+ *    reduces the vCPU's priority temporarily to the lowest priority in
+ *    the system. The scheduler is then invoked to rebalance the system.
+ *
+ *    A waiting vCPU will continue execution and return from the `sync_wfe`
+ *    operation as soon as no higher priority vCPUs in the system are
+ *        available
+ *    for scheduling, or a wake-up event is signalled by another vCPU in
+ *        the same
+ *    domain.
+ *
+ *    @par holder
+ *    The holder identifier may be a valid capability to another vCPU, or
+ *        an
+ *    invalid id. If the provided id is valid, it is used as a hint to the
+ *    Microvisor that the caller is waiting on the specified vCPU. The
+ *    `vcpu_sync` API is optimized for short spinlock type use-cases and
+ *        will
+ *    therefore allow the caller to continue execution without waiting, if
+ *        the
+ *    target \p holder vCPU is presently running on another physical core.
+ *        This
+ *    is done to reduce latency with the expectation that the holder vCPU
+ *        will
+ *    soon release the lock.
+ *
+ *    @see _okl4_sys_vcpu_sync_sev
+ *
+ * @param holder
+ *    Capability of the vCPU to wait for, or an invalid designator.
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_wfe(okl4_kcap_t holder)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)holder;
+    __asm__ __volatile__(
+            ""hvc(5127)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_wfe(okl4_kcap_t holder)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)holder;
+    __asm__ __volatile__(
+            "" hvc(5127) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Atomically fetch an interrupt payload and raise a virtual interrupt.
+ *
+ *    @details
+ *    This API is equivalent to atomically calling @ref
+ *        sys_interrupt_get_payload
+ *    and @ref sys_vinterrupt_modify. Typically, the specified virtual
+ *        interrupt
+ *    will be one that is not attached to the specified virtual interrupt
+ *        source,
+ *    but this is not enforced. If only one virtual interrupt source is
+ *        affected,
+ *    then the @ref sys_interrupt_get_payload phase will occur first.
+ *
+ *    Certain communication protocols must perform this sequence of
+ *        operations
+ *    atomically in order to maintain consistency. Other than being atomic,
+ *        this
+ *    is no different to invoking the two component operations separately.
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param virqline
+ *    A virtual interrupt line capability.
+ * @param mask
+ *    A machine-word-sized array of payload flags to preserve.
+ * @param payload
+ *    A machine-word-sized array of payload flags to set.
+ *
+ * @retval error
+ *    The resulting error value.
+ * @retval payload
+ *    Accumulated virtual interrupt payload flags.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_vinterrupt_clear_and_raise_return
+_okl4_sys_vinterrupt_clear_and_raise(okl4_interrupt_number_t irq,
+        okl4_kcap_t virqline, okl4_virq_flags_t mask, okl4_virq_flags_t payload)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp payload_tmp;
+    struct _okl4_sys_vinterrupt_clear_and_raise_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)virqline;
+    register uint32_t r2 asm("r2") = (uint32_t)(mask        & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)((mask >> 32) & 0xffffffff);
+    register uint32_t r4 asm("r4") = (uint32_t)(payload        & 0xffffffff);
+    register uint32_t r5 asm("r5") = (uint32_t)((payload >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5194)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    payload_tmp.words.lo = r1;
+    payload_tmp.words.hi = r2;
+    result.payload = (okl4_virq_flags_t)(payload_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_vinterrupt_clear_and_raise_return
+_okl4_sys_vinterrupt_clear_and_raise(okl4_interrupt_number_t irq,
+        okl4_kcap_t virqline, okl4_virq_flags_t mask, okl4_virq_flags_t payload)
+{
+    struct _okl4_sys_vinterrupt_clear_and_raise_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)virqline;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)mask;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)payload;
+    __asm__ __volatile__(
+            "" hvc(5194) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.payload = (okl4_virq_flags_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Raise a virtual interrupt, and modify the payload flags.
+ *
+ *    @details
+ *    This triggers a virtual interrupt by raising a virtual interrupt
+ *        source. A
+ *    virtual interrupt source object is distinct from a virtual interrupt.
+ *        A
+ *    virtual interrupt source is always linked to a virtual interrupt, but
+ *        the
+ *    reverse is not true.
+ *
+ *    Each Microvisor virtual interrupt carries a payload of flags which
+ *        may be
+ *    fetched by the recipient of the interrupt. An interrupt payload is a
+ *        @ref
+ *    okl4_word_t sized array of flags, packed into a single word. Flags
+ *        are
+ *    cleared whenever the interrupt recipient fetches the payload with the
+ *        @ref
+ *    okl4_sys_interrupt_get_payload API.
+ *
+ *    The interrupt-modify API allows the caller to pass in a new set of
+ *        flags in
+ *    the \p payload field, and a set of flags to keep from the previous
+ *        payload
+ *    in the \p mask field. If the interrupt has previously been raised and
+ *        not
+ *    yet delivered, the flags accumulate with a mask; that is, each flag
+ *        is the
+ *    boolean OR of the specified value with the boolean AND of its
+ *        previous
+ *    value and the mask.
+ *
+ *    When the recipient has configured the interrupt for edge triggering,
+ *        an
+ *    invocation of this API is counted as a single edge; this triggers
+ *        interrupt
+ *    delivery if the interrupt is not already pending, irrespective of the
+ *    payload. If the interrupt is configured for level triggering, then
+ *        its
+ *    pending state is the boolean OR of its payload flags after any
+ *        specified
+ *    flags are cleared or raised; at least one flag must be set in the new
+ *    payload to permit delivery of a level-triggered interrupt.
+ *
+ * @param virqline
+ *    A virtual interrupt line capability.
+ * @param mask
+ *    A machine-word-sized array of payload flags to preserve.
+ * @param payload
+ *    A machine-word-sized array of payload flags to set.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_modify(okl4_kcap_t virqline, okl4_virq_flags_t mask,
+        okl4_virq_flags_t payload)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)virqline;
+    register uint32_t r1 asm("r1") = (uint32_t)(mask        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((mask >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)(payload        & 0xffffffff);
+    register uint32_t r4 asm("r4") = (uint32_t)((payload >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5195)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_modify(okl4_kcap_t virqline, okl4_virq_flags_t mask,
+        okl4_virq_flags_t payload)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)virqline;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)mask;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)payload;
+    __asm__ __volatile__(
+            "" hvc(5195) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Raise a virtual interrupt, setting specified payload flags.
+ *
+ *    @details
+ *    This triggers a virtual interrupt by raising a virtual interrupt
+ *        source. A
+ *    virtual interrupt source object is distinct from a virtual interrupt.
+ *        A
+ *    virtual interrupt source is always linked to a virtual interrupt, but
+ *        the
+ *    reverse is not true.
+ *
+ *    Each Microvisor virtual interrupt carries a payload of flags which
+ *        may be
+ *    fetched by the recipient of the interrupt. An interrupt payload is a
+ *        @ref
+ *    okl4_word_t sized array of flags, packed into a single word. Flags
+ *        are
+ *    cleared whenever the interrupt recipient fetches the payload with the
+ *        @ref
+ *    okl4_sys_interrupt_get_payload API.
+ *
+ *    The interrupt-raise API allows the caller to pass in a new set of
+ *        flags in
+ *    the \p payload field. If the interrupt has previously been raised and
+ *        not
+ *    yet delivered, the flags accumulate; that is, each flag is the
+ *        boolean OR
+ *    of its previous value and the specified value.
+ *
+ *    When the recipient has configured the interrupt for edge triggering,
+ *        an
+ *    invocation of this API is counted as a single edge; this triggers
+ *        interrupt
+ *    delivery if the interrupt is not already pending, irrespective of the
+ *    payload. If the interrupt is configured for level triggering, then
+ *        its
+ *    pending state is the boolean OR of its payload flags after any
+ *        specified
+ *    flags are raised; at least one flag must be set in the new payload to
+ *    permit delivery of a level-triggered interrupt.
+ *
+ *    @note Invoking this API is equivalent to invoking the @ref
+ *    okl4_sys_vinterrupt_modify API with all bits set in the \p mask
+ *        value.
+ *
+ *    @note This API is distinct from the @ref okl4_sys_interrupt_raise
+ *        API,
+ *    which raises a local software-generated interrupt without requiring
+ *        an
+ *    explicit capability.
+ *
+ * @param virqline
+ *    A virtual interrupt line capability.
+ * @param payload
+ *    A machine-word-sized array of payload flags to set.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_raise(okl4_kcap_t virqline, okl4_virq_flags_t payload)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)virqline;
+    register uint32_t r1 asm("r1") = (uint32_t)(payload        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((payload >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5196)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_raise(okl4_kcap_t virqline, okl4_virq_flags_t payload)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)virqline;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)payload;
+    __asm__ __volatile__(
+            "" hvc(5196) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+
+/*lint -restore */
+
+#endif /* !ASSEMBLY */
+
+/*
+ * Assembly system call prototypes / numbers.
+ */
+
+/** @addtogroup lib_microvisor_syscall_numbers Microvisor System Call Numbers
+ * @{
+ */
+#define OKL4_SYSCALL_AXON_PROCESS_RECV 5184
+
+#define OKL4_SYSCALL_AXON_SET_HALTED 5186
+
+#define OKL4_SYSCALL_AXON_SET_RECV_AREA 5187
+
+#define OKL4_SYSCALL_AXON_SET_RECV_QUEUE 5188
+
+#define OKL4_SYSCALL_AXON_SET_RECV_SEGMENT 5189
+
+#define OKL4_SYSCALL_AXON_SET_SEND_AREA 5190
+
+#define OKL4_SYSCALL_AXON_SET_SEND_QUEUE 5191
+
+#define OKL4_SYSCALL_AXON_SET_SEND_SEGMENT 5192
+
+#define OKL4_SYSCALL_AXON_TRIGGER_SEND 5185
+
+#define OKL4_SYSCALL_INTERRUPT_ACK 5128
+
+#define OKL4_SYSCALL_INTERRUPT_ATTACH_PRIVATE 5134
+
+#define OKL4_SYSCALL_INTERRUPT_ATTACH_SHARED 5135
+
+#define OKL4_SYSCALL_INTERRUPT_DETACH 5136
+
+#define OKL4_SYSCALL_INTERRUPT_DIST_ENABLE 5133
+
+#define OKL4_SYSCALL_INTERRUPT_EOI 5129
+
+#define OKL4_SYSCALL_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING 5137
+
+#define OKL4_SYSCALL_INTERRUPT_GET_PAYLOAD 5132
+
+#define OKL4_SYSCALL_INTERRUPT_LIMITS 5138
+
+#define OKL4_SYSCALL_INTERRUPT_MASK 5130
+
+#define OKL4_SYSCALL_INTERRUPT_RAISE 5145
+
+#define OKL4_SYSCALL_INTERRUPT_SET_BINARY_POINT 5139
+
+#define OKL4_SYSCALL_INTERRUPT_SET_CONFIG 5140
+
+#define OKL4_SYSCALL_INTERRUPT_SET_CONTROL 5141
+
+#define OKL4_SYSCALL_INTERRUPT_SET_PRIORITY 5142
+
+#define OKL4_SYSCALL_INTERRUPT_SET_PRIORITY_MASK 5143
+
+#define OKL4_SYSCALL_INTERRUPT_SET_TARGETS 5144
+
+#define OKL4_SYSCALL_INTERRUPT_UNMASK 5131
+
+#define OKL4_SYSCALL_KDB_INTERACT 5120
+
+#define OKL4_SYSCALL_KDB_SET_OBJECT_NAME 5121
+
+#define OKL4_SYSCALL_KSP_PROCEDURE_CALL 5197
+
+#define OKL4_SYSCALL_MMU_ATTACH_SEGMENT 5152
+
+#define OKL4_SYSCALL_MMU_DETACH_SEGMENT 5153
+
+#define OKL4_SYSCALL_MMU_FLUSH_RANGE 5154
+
+#define OKL4_SYSCALL_MMU_FLUSH_RANGE_PN 5155
+
+#define OKL4_SYSCALL_MMU_LOOKUP_PAGE 5156
+
+#define OKL4_SYSCALL_MMU_LOOKUP_PN 5157
+
+#define OKL4_SYSCALL_MMU_MAP_PAGE 5158
+
+#define OKL4_SYSCALL_MMU_MAP_PN 5159
+
+#define OKL4_SYSCALL_MMU_UNMAP_PAGE 5160
+
+#define OKL4_SYSCALL_MMU_UNMAP_PN 5161
+
+#define OKL4_SYSCALL_MMU_UPDATE_PAGE_ATTRS 5162
+
+#define OKL4_SYSCALL_MMU_UPDATE_PAGE_PERMS 5163
+
+#define OKL4_SYSCALL_MMU_UPDATE_PN_ATTRS 5164
+
+#define OKL4_SYSCALL_MMU_UPDATE_PN_PERMS 5165
+
+#define OKL4_SYSCALL_PERFORMANCE_NULL_SYSCALL 5198
+
+#define OKL4_SYSCALL_PIPE_CONTROL 5146
+
+#define OKL4_SYSCALL_PIPE_RECV 5147
+
+#define OKL4_SYSCALL_PIPE_SEND 5148
+
+#define OKL4_SYSCALL_PRIORITY_WAIVE 5151
+
+#define OKL4_SYSCALL_REMOTE_GET_REGISTER 5200
+
+#define OKL4_SYSCALL_REMOTE_GET_REGISTERS 5201
+
+#define OKL4_SYSCALL_REMOTE_READ_MEMORY32 5202
+
+#define OKL4_SYSCALL_REMOTE_SET_REGISTER 5203
+
+#define OKL4_SYSCALL_REMOTE_SET_REGISTERS 5204
+
+#define OKL4_SYSCALL_REMOTE_WRITE_MEMORY32 5205
+
+#define OKL4_SYSCALL_SCHEDULE_METRICS_STATUS_SUSPENDED 5206
+
+#define OKL4_SYSCALL_SCHEDULE_METRICS_WATCH_SUSPENDED 5207
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_CPU_DISABLE 5168
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_CPU_ENABLE 5169
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_CPU_GET_DATA 5170
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_DISABLE 5171
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_ENABLE 5172
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_GET_DATA 5173
+
+#define OKL4_SYSCALL_SCHEDULER_SUSPEND 5150
+
+#define OKL4_SYSCALL_TIMER_CANCEL 5176
+
+#define OKL4_SYSCALL_TIMER_GET_RESOLUTION 5177
+
+#define OKL4_SYSCALL_TIMER_GET_TIME 5178
+
+#define OKL4_SYSCALL_TIMER_QUERY 5179
+
+#define OKL4_SYSCALL_TIMER_START 5180
+
+#define OKL4_SYSCALL_TRACEBUFFER_SYNC 5199
+
+#define OKL4_SYSCALL_VCPU_RESET 5122
+
+#define OKL4_SYSCALL_VCPU_START 5123
+
+#define OKL4_SYSCALL_VCPU_STOP 5124
+
+#define OKL4_SYSCALL_VCPU_SWITCH_MODE 5125
+
+#define OKL4_SYSCALL_VCPU_SYNC_SEV 5126
+
+#define OKL4_SYSCALL_VCPU_SYNC_WFE 5127
+
+#define OKL4_SYSCALL_VINTERRUPT_CLEAR_AND_RAISE 5194
+
+#define OKL4_SYSCALL_VINTERRUPT_MODIFY 5195
+
+#define OKL4_SYSCALL_VINTERRUPT_RAISE 5196
+
+/** @} */
+#undef hvc
+
+#if defined(_definitions_for_linters)
+/* Ignore lint identifier clashes for syscall names. */
+/*lint -esym(621, _okl4_sys_axon_process_recv) */
+/*lint -esym(621, _okl4_sys_axon_set_halted) */
+/*lint -esym(621, _okl4_sys_axon_set_recv_area) */
+/*lint -esym(621, _okl4_sys_axon_set_recv_queue) */
+/*lint -esym(621, _okl4_sys_axon_set_recv_segment) */
+/*lint -esym(621, _okl4_sys_axon_set_send_area) */
+/*lint -esym(621, _okl4_sys_axon_set_send_queue) */
+/*lint -esym(621, _okl4_sys_axon_set_send_segment) */
+/*lint -esym(621, _okl4_sys_axon_trigger_send) */
+/*lint -esym(621, _okl4_sys_interrupt_ack) */
+/*lint -esym(621, _okl4_sys_interrupt_attach_private) */
+/*lint -esym(621, _okl4_sys_interrupt_attach_shared) */
+/*lint -esym(621, _okl4_sys_interrupt_detach) */
+/*lint -esym(621, _okl4_sys_interrupt_dist_enable) */
+/*lint -esym(621, _okl4_sys_interrupt_eoi) */
+/*lint -esym(621, _okl4_sys_interrupt_get_highest_priority_pending) */
+/*lint -esym(621, _okl4_sys_interrupt_get_payload) */
+/*lint -esym(621, _okl4_sys_interrupt_limits) */
+/*lint -esym(621, _okl4_sys_interrupt_mask) */
+/*lint -esym(621, _okl4_sys_interrupt_raise) */
+/*lint -esym(621, _okl4_sys_interrupt_set_binary_point) */
+/*lint -esym(621, _okl4_sys_interrupt_set_config) */
+/*lint -esym(621, _okl4_sys_interrupt_set_control) */
+/*lint -esym(621, _okl4_sys_interrupt_set_priority) */
+/*lint -esym(621, _okl4_sys_interrupt_set_priority_mask) */
+/*lint -esym(621, _okl4_sys_interrupt_set_targets) */
+/*lint -esym(621, _okl4_sys_interrupt_unmask) */
+/*lint -esym(621, _okl4_sys_kdb_interact) */
+/*lint -esym(621, _okl4_sys_kdb_set_object_name) */
+/*lint -esym(621, _okl4_sys_ksp_procedure_call) */
+/*lint -esym(621, _okl4_sys_mmu_attach_segment) */
+/*lint -esym(621, _okl4_sys_mmu_detach_segment) */
+/*lint -esym(621, _okl4_sys_mmu_flush_range) */
+/*lint -esym(621, _okl4_sys_mmu_flush_range_pn) */
+/*lint -esym(621, _okl4_sys_mmu_lookup_page) */
+/*lint -esym(621, _okl4_sys_mmu_lookup_pn) */
+/*lint -esym(621, _okl4_sys_mmu_map_page) */
+/*lint -esym(621, _okl4_sys_mmu_map_pn) */
+/*lint -esym(621, _okl4_sys_mmu_unmap_page) */
+/*lint -esym(621, _okl4_sys_mmu_unmap_pn) */
+/*lint -esym(621, _okl4_sys_mmu_update_page_attrs) */
+/*lint -esym(621, _okl4_sys_mmu_update_page_perms) */
+/*lint -esym(621, _okl4_sys_mmu_update_pn_attrs) */
+/*lint -esym(621, _okl4_sys_mmu_update_pn_perms) */
+/*lint -esym(621, _okl4_sys_performance_null_syscall) */
+/*lint -esym(621, _okl4_sys_pipe_control) */
+/*lint -esym(621, _okl4_sys_pipe_recv) */
+/*lint -esym(621, _okl4_sys_pipe_send) */
+/*lint -esym(621, _okl4_sys_priority_waive) */
+/*lint -esym(621, _okl4_sys_remote_get_register) */
+/*lint -esym(621, _okl4_sys_remote_get_registers) */
+/*lint -esym(621, _okl4_sys_remote_read_memory32) */
+/*lint -esym(621, _okl4_sys_remote_set_register) */
+/*lint -esym(621, _okl4_sys_remote_set_registers) */
+/*lint -esym(621, _okl4_sys_remote_write_memory32) */
+/*lint -esym(621, _okl4_sys_schedule_metrics_status_suspended) */
+/*lint -esym(621, _okl4_sys_schedule_metrics_watch_suspended) */
+/*lint -esym(621, _okl4_sys_schedule_profile_cpu_disable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_cpu_enable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_cpu_get_data) */
+/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_disable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_enable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_get_data) */
+/*lint -esym(621, _okl4_sys_scheduler_suspend) */
+/*lint -esym(621, _okl4_sys_timer_cancel) */
+/*lint -esym(621, _okl4_sys_timer_get_resolution) */
+/*lint -esym(621, _okl4_sys_timer_get_time) */
+/*lint -esym(621, _okl4_sys_timer_query) */
+/*lint -esym(621, _okl4_sys_timer_start) */
+/*lint -esym(621, _okl4_sys_tracebuffer_sync) */
+/*lint -esym(621, _okl4_sys_vcpu_reset) */
+/*lint -esym(621, _okl4_sys_vcpu_start) */
+/*lint -esym(621, _okl4_sys_vcpu_stop) */
+/*lint -esym(621, _okl4_sys_vcpu_switch_mode) */
+/*lint -esym(621, _okl4_sys_vcpu_sync_sev) */
+/*lint -esym(621, _okl4_sys_vcpu_sync_wfe) */
+/*lint -esym(621, _okl4_sys_vinterrupt_clear_and_raise) */
+/*lint -esym(621, _okl4_sys_vinterrupt_modify) */
+/*lint -esym(621, _okl4_sys_vinterrupt_raise) */
+#endif
+#endif /* __AUTO__USER_SYSCALLS_H__ */
+/** @} */
diff --git a/include/microvisor/kernel/types.h b/include/microvisor/kernel/types.h
new file mode 100644
index 0000000..c87285c
--- /dev/null
+++ b/include/microvisor/kernel/types.h
@@ -0,0 +1,16064 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+/** @addtogroup lib_microvisor
+ * @{
+ */
+/** @addtogroup lib_microvisor_types Microvisor Types
+ * @{
+ */
+#ifndef __AUTO__MICROVISOR_TYPES_H__
+#define __AUTO__MICROVISOR_TYPES_H__
+
+#if !defined(ASSEMBLY)
+
+#define OKL4_DEFAULT_PERMS OKL4_PAGE_PERMS_RWX
+#define OKL4_DEFAULT_CACHE_ATTRIBUTES OKL4_PAGE_CACHE_DEFAULT
+
+#if __SIZEOF_POINTER__ != 8
+#define __ptr64(type, name) union { type name; uint64_t _x_##name; }
+#define __ptr64_array(type, name) union { type val; uint64_t _x; } name
+#else
+#define __ptr64(type, name) type name
+#define __ptr64_array(type, name) type name
+#endif
+
+/**
+    The `okl4_bool_t` type represents a standard boolean value.  Valid values are
+    restricted to @ref OKL4_TRUE and @ref OKL4_FALSE.
+*/
+
+typedef _Bool okl4_bool_t;
+
+
+
+
+
+
+
+
+/**
+    - BITS 7..0 -   @ref OKL4_MASK_AFF0_ARM_MPIDR
+    - BITS 15..8 -   @ref OKL4_MASK_AFF1_ARM_MPIDR
+    - BITS 23..16 -   @ref OKL4_MASK_AFF2_ARM_MPIDR
+    - BIT 24 -   @ref OKL4_MASK_MT_ARM_MPIDR
+    - BIT 30 -   @ref OKL4_MASK_U_ARM_MPIDR
+    - BIT 31 -   @ref OKL4_MASK_MP_ARM_MPIDR
+    - BITS 39..32 -   @ref OKL4_MASK_AFF3_ARM_MPIDR
+*/
+
+/*lint -esym(621, okl4_arm_mpidr_t) */
+typedef uint64_t okl4_arm_mpidr_t;
+
+/*lint -esym(621, okl4_arm_mpidr_getaff0) */
+/*lint -esym(714, okl4_arm_mpidr_getaff0) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff0(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff0) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff0(okl4_arm_mpidr_t *x, uint64_t _aff0);
+
+/*lint -esym(621, okl4_arm_mpidr_getaff1) */
+/*lint -esym(714, okl4_arm_mpidr_getaff1) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff1(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff1) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff1(okl4_arm_mpidr_t *x, uint64_t _aff1);
+
+/*lint -esym(621, okl4_arm_mpidr_getaff2) */
+/*lint -esym(714, okl4_arm_mpidr_getaff2) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff2(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff2) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff2(okl4_arm_mpidr_t *x, uint64_t _aff2);
+
+/*lint -esym(621, okl4_arm_mpidr_getaff3) */
+/*lint -esym(714, okl4_arm_mpidr_getaff3) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff3(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff3) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff3(okl4_arm_mpidr_t *x, uint64_t _aff3);
+
+/*lint -esym(621, okl4_arm_mpidr_getmt) */
+/*lint -esym(714, okl4_arm_mpidr_getmt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmt(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setmt) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setmt(okl4_arm_mpidr_t *x, okl4_bool_t _mt);
+
+/*lint -esym(621, okl4_arm_mpidr_getu) */
+/*lint -esym(714, okl4_arm_mpidr_getu) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getu(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setu) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setu(okl4_arm_mpidr_t *x, okl4_bool_t _u);
+
+/*lint -esym(621, okl4_arm_mpidr_getmp) */
+/*lint -esym(714, okl4_arm_mpidr_getmp) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmp(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(714, okl4_arm_mpidr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_init(okl4_arm_mpidr_t *x);
+
+/*lint -esym(714, okl4_arm_mpidr_cast) */
+OKL4_FORCE_INLINE okl4_arm_mpidr_t
+okl4_arm_mpidr_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF0_MASK) */
+#define OKL4_ARM_MPIDR_AFF0_MASK ((okl4_arm_mpidr_t)255U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF0_ARM_MPIDR) */
+#define OKL4_MASK_AFF0_ARM_MPIDR ((okl4_arm_mpidr_t)255U)
+/*lint -esym(621, OKL4_SHIFT_AFF0_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF0_ARM_MPIDR (0)
+/*lint -esym(621, OKL4_WIDTH_AFF0_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF0_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF1_MASK) */
+#define OKL4_ARM_MPIDR_AFF1_MASK ((okl4_arm_mpidr_t)255U << 8) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF1_ARM_MPIDR) */
+#define OKL4_MASK_AFF1_ARM_MPIDR ((okl4_arm_mpidr_t)255U << 8)
+/*lint -esym(621, OKL4_SHIFT_AFF1_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_WIDTH_AFF1_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF2_MASK) */
+#define OKL4_ARM_MPIDR_AFF2_MASK ((okl4_arm_mpidr_t)255U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF2_ARM_MPIDR) */
+#define OKL4_MASK_AFF2_ARM_MPIDR ((okl4_arm_mpidr_t)255U << 16)
+/*lint -esym(621, OKL4_SHIFT_AFF2_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF2_ARM_MPIDR (16)
+/*lint -esym(621, OKL4_WIDTH_AFF2_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF2_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ARM_MPIDR_MT_MASK) */
+#define OKL4_ARM_MPIDR_MT_MASK ((okl4_arm_mpidr_t)1U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MT_ARM_MPIDR) */
+#define OKL4_MASK_MT_ARM_MPIDR ((okl4_arm_mpidr_t)1U << 24)
+/*lint -esym(621, OKL4_SHIFT_MT_ARM_MPIDR) */
+#define OKL4_SHIFT_MT_ARM_MPIDR (24)
+/*lint -esym(621, OKL4_WIDTH_MT_ARM_MPIDR) */
+#define OKL4_WIDTH_MT_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ARM_MPIDR_U_MASK) */
+#define OKL4_ARM_MPIDR_U_MASK ((okl4_arm_mpidr_t)1U << 30) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_U_ARM_MPIDR) */
+#define OKL4_MASK_U_ARM_MPIDR ((okl4_arm_mpidr_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_U_ARM_MPIDR) */
+#define OKL4_SHIFT_U_ARM_MPIDR (30)
+/*lint -esym(621, OKL4_WIDTH_U_ARM_MPIDR) */
+#define OKL4_WIDTH_U_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ARM_MPIDR_MP_MASK) */
+#define OKL4_ARM_MPIDR_MP_MASK ((okl4_arm_mpidr_t)1U << 31) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MP_ARM_MPIDR) */
+#define OKL4_MASK_MP_ARM_MPIDR ((okl4_arm_mpidr_t)1U << 31)
+/*lint -esym(621, OKL4_SHIFT_MP_ARM_MPIDR) */
+#define OKL4_SHIFT_MP_ARM_MPIDR (31)
+/*lint -esym(621, OKL4_WIDTH_MP_ARM_MPIDR) */
+#define OKL4_WIDTH_MP_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF3_MASK) */
+#define OKL4_ARM_MPIDR_AFF3_MASK ((okl4_arm_mpidr_t)255U << 32) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF3_ARM_MPIDR) */
+#define OKL4_MASK_AFF3_ARM_MPIDR ((okl4_arm_mpidr_t)255U << 32)
+/*lint -esym(621, OKL4_SHIFT_AFF3_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF3_ARM_MPIDR (32)
+/*lint -esym(621, OKL4_WIDTH_AFF3_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF3_ARM_MPIDR (8)
+
+
+/*lint -sem(okl4_arm_mpidr_getaff0, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff0) */
+/*lint -esym(714, okl4_arm_mpidr_getaff0) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff0(const okl4_arm_mpidr_t *x)
+{
+    uint64_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint64_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff0, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff0) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff0) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff0(okl4_arm_mpidr_t *x, uint64_t _aff0)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_aff0;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getaff1, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff1) */
+/*lint -esym(714, okl4_arm_mpidr_getaff1) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff1(const okl4_arm_mpidr_t *x)
+{
+    uint64_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 8;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint64_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff1, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff1) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff1) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff1(okl4_arm_mpidr_t *x, uint64_t _aff1)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 8;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_aff1;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getaff2, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff2) */
+/*lint -esym(714, okl4_arm_mpidr_getaff2) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff2(const okl4_arm_mpidr_t *x)
+{
+    uint64_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 16;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint64_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff2, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff2) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff2) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff2(okl4_arm_mpidr_t *x, uint64_t _aff2)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 16;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_aff2;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getmt, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_mpidr_getmt) */
+/*lint -esym(714, okl4_arm_mpidr_getmt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmt(const okl4_arm_mpidr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 24;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setmt, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_mpidr_setmt) */
+
+/*lint -esym(621, okl4_arm_mpidr_setmt) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setmt(okl4_arm_mpidr_t *x, okl4_bool_t _mt)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 24;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_mt;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getu, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_mpidr_getu) */
+/*lint -esym(714, okl4_arm_mpidr_getu) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getu(const okl4_arm_mpidr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setu, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_mpidr_setu) */
+
+/*lint -esym(621, okl4_arm_mpidr_setu) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setu(okl4_arm_mpidr_t *x, okl4_bool_t _u)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_u;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getmp, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_mpidr_getmp) */
+/*lint -esym(714, okl4_arm_mpidr_getmp) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmp(const okl4_arm_mpidr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 31;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_getaff3, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff3) */
+/*lint -esym(714, okl4_arm_mpidr_getaff3) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff3(const okl4_arm_mpidr_t *x)
+{
+    uint64_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 32;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint64_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff3, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff3) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff3) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff3(okl4_arm_mpidr_t *x, uint64_t _aff3)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 32;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_aff3;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_arm_mpidr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_init(okl4_arm_mpidr_t *x)
+{
+    *x = (okl4_arm_mpidr_t)2147483648U;
+}
+
+/*lint -esym(714, okl4_arm_mpidr_cast) */
+OKL4_FORCE_INLINE okl4_arm_mpidr_t
+okl4_arm_mpidr_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_arm_mpidr_t x = (okl4_arm_mpidr_t)p;
+    if (force) {
+        x &= ~(okl4_arm_mpidr_t)0x80000000U;
+        x |= (okl4_arm_mpidr_t)0x80000000U; /* x.mp */
+    }
+    return x;
+}
+
+
+
+
+/*lint -esym(621, OKL4_AXON_NUM_RECEIVE_QUEUES) */
+#define OKL4_AXON_NUM_RECEIVE_QUEUES ((uint32_t)(4U))
+
+/*lint -esym(621, OKL4_AXON_NUM_SEND_QUEUES) */
+#define OKL4_AXON_NUM_SEND_QUEUES ((uint32_t)(4U))
+
+/*lint -esym(621, _OKL4_POISON) */
+#define _OKL4_POISON ((uint32_t)(3735928559U))
+
+/*lint -esym(621, OKL4_TRACEBUFFER_INVALID_REF) */
+#define OKL4_TRACEBUFFER_INVALID_REF ((uint32_t)(0xffffffffU))
+
+
+
+
+typedef uint32_t okl4_arm_psci_function_t;
+
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_VERSION) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_VERSION ((okl4_arm_psci_function_t)0x0U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_SUSPEND) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_SUSPEND ((okl4_arm_psci_function_t)0x1U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_OFF) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_OFF ((okl4_arm_psci_function_t)0x2U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_ON) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_ON ((okl4_arm_psci_function_t)0x3U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_AFFINITY_INFO) */
+#define OKL4_ARM_PSCI_FUNCTION_AFFINITY_INFO ((okl4_arm_psci_function_t)0x4U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MIGRATE) */
+#define OKL4_ARM_PSCI_FUNCTION_MIGRATE ((okl4_arm_psci_function_t)0x5U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE) */
+#define OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE ((okl4_arm_psci_function_t)0x6U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU) */
+#define OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU ((okl4_arm_psci_function_t)0x7U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_OFF) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_OFF ((okl4_arm_psci_function_t)0x8U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET ((okl4_arm_psci_function_t)0x9U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_FEATURES) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_FEATURES ((okl4_arm_psci_function_t)0xaU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_FREEZE) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_FREEZE ((okl4_arm_psci_function_t)0xbU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND ((okl4_arm_psci_function_t)0xcU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_NODE_HW_STATE) */
+#define OKL4_ARM_PSCI_FUNCTION_NODE_HW_STATE ((okl4_arm_psci_function_t)0xdU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND ((okl4_arm_psci_function_t)0xeU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE ((okl4_arm_psci_function_t)0xfU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY ((okl4_arm_psci_function_t)0x10U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT ((okl4_arm_psci_function_t)0x11U)
+
+/*lint -esym(714, okl4_arm_psci_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_function_is_element_of(okl4_arm_psci_function_t var);
+
+
+/*lint -esym(714, okl4_arm_psci_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_function_is_element_of(okl4_arm_psci_function_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ARM_PSCI_FUNCTION_PSCI_VERSION) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_SUSPEND) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_OFF) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_ON) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_AFFINITY_INFO) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_MIGRATE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_OFF) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_PSCI_FEATURES) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_FREEZE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_NODE_HW_STATE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT));
+}
+
+
+
+typedef uint32_t okl4_arm_psci_result_t;
+
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_SUCCESS) */
+#define OKL4_ARM_PSCI_RESULT_SUCCESS ((okl4_arm_psci_result_t)0x0U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_INVALID_ADDRESS) */
+#define OKL4_ARM_PSCI_RESULT_INVALID_ADDRESS ((okl4_arm_psci_result_t)0xfffffff7U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_DISABLED) */
+#define OKL4_ARM_PSCI_RESULT_DISABLED ((okl4_arm_psci_result_t)0xfffffff8U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_NOT_PRESENT) */
+#define OKL4_ARM_PSCI_RESULT_NOT_PRESENT ((okl4_arm_psci_result_t)0xfffffff9U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_INTERNAL_FAILURE) */
+#define OKL4_ARM_PSCI_RESULT_INTERNAL_FAILURE ((okl4_arm_psci_result_t)0xfffffffaU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_ON_PENDING) */
+#define OKL4_ARM_PSCI_RESULT_ON_PENDING ((okl4_arm_psci_result_t)0xfffffffbU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_ALREADY_ON) */
+#define OKL4_ARM_PSCI_RESULT_ALREADY_ON ((okl4_arm_psci_result_t)0xfffffffcU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_DENIED) */
+#define OKL4_ARM_PSCI_RESULT_DENIED ((okl4_arm_psci_result_t)0xfffffffdU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_INVALID_PARAMETERS) */
+#define OKL4_ARM_PSCI_RESULT_INVALID_PARAMETERS ((okl4_arm_psci_result_t)0xfffffffeU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_NOT_SUPPORTED) */
+#define OKL4_ARM_PSCI_RESULT_NOT_SUPPORTED ((okl4_arm_psci_result_t)0xffffffffU)
+
+/*lint -esym(714, okl4_arm_psci_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_result_is_element_of(okl4_arm_psci_result_t var);
+
+
+/*lint -esym(714, okl4_arm_psci_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_result_is_element_of(okl4_arm_psci_result_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ARM_PSCI_RESULT_SUCCESS) ||
+            (var == OKL4_ARM_PSCI_RESULT_NOT_SUPPORTED) ||
+            (var == OKL4_ARM_PSCI_RESULT_INVALID_PARAMETERS) ||
+            (var == OKL4_ARM_PSCI_RESULT_DENIED) ||
+            (var == OKL4_ARM_PSCI_RESULT_ALREADY_ON) ||
+            (var == OKL4_ARM_PSCI_RESULT_ON_PENDING) ||
+            (var == OKL4_ARM_PSCI_RESULT_INTERNAL_FAILURE) ||
+            (var == OKL4_ARM_PSCI_RESULT_NOT_PRESENT) ||
+            (var == OKL4_ARM_PSCI_RESULT_DISABLED) ||
+            (var == OKL4_ARM_PSCI_RESULT_INVALID_ADDRESS));
+}
+
+
+/**
+    - BITS 15..0 -   @ref OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE
+    - BIT 16 -   @ref OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE
+    - BITS 25..24 -   @ref OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE
+*/
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_t) */
+typedef uint32_t okl4_arm_psci_suspend_state_t;
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_getstateid) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getstateid) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getstateid(const okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setstateid) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setstateid(okl4_arm_psci_suspend_state_t *x, uint32_t _state_id);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerdown) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerdown) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_suspend_state_getpowerdown(const okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerdown) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerdown(okl4_arm_psci_suspend_state_t *x, okl4_bool_t _power_down);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerlevel) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerlevel) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getpowerlevel(const okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerlevel) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerlevel(okl4_arm_psci_suspend_state_t *x, uint32_t _power_level);
+
+/*lint -esym(714, okl4_arm_psci_suspend_state_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_init(okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(714, okl4_arm_psci_suspend_state_cast) */
+OKL4_FORCE_INLINE okl4_arm_psci_suspend_state_t
+okl4_arm_psci_suspend_state_cast(uint32_t p, okl4_bool_t force);
+
+
+
+/*lint -esym(621, OKL4_ARM_PSCI_POWER_LEVEL_CPU) */
+#define OKL4_ARM_PSCI_POWER_LEVEL_CPU ((okl4_arm_psci_suspend_state_t)(0U))
+
+/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_STATE_ID_MASK) */
+#define OKL4_ARM_PSCI_SUSPEND_STATE_STATE_ID_MASK ((okl4_arm_psci_suspend_state_t)65535U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)65535U)
+/*lint -esym(621, OKL4_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE (0)
+/*lint -esym(621, OKL4_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_POWER_DOWN_MASK) */
+#define OKL4_ARM_PSCI_SUSPEND_STATE_POWER_DOWN_MASK ((okl4_arm_psci_suspend_state_t)1U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)1U << 16)
+/*lint -esym(621, OKL4_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1)
+/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_POWER_LEVEL_MASK) */
+#define OKL4_ARM_PSCI_SUSPEND_STATE_POWER_LEVEL_MASK ((okl4_arm_psci_suspend_state_t)3U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)3U << 24)
+/*lint -esym(621, OKL4_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (24)
+/*lint -esym(621, OKL4_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (2)
+
+
+/*lint -sem(okl4_arm_psci_suspend_state_getstateid, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_arm_psci_suspend_state_getstateid) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getstateid) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getstateid(const okl4_arm_psci_suspend_state_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_psci_suspend_state_setstateid, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_setstateid) */
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setstateid) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setstateid(okl4_arm_psci_suspend_state_t *x, uint32_t _state_id)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_state_id;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_psci_suspend_state_getpowerdown, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerdown) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerdown) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_suspend_state_getpowerdown(const okl4_arm_psci_suspend_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_psci_suspend_state_setpowerdown, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_setpowerdown) */
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerdown) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerdown(okl4_arm_psci_suspend_state_t *x, okl4_bool_t _power_down)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_power_down;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_psci_suspend_state_getpowerlevel, 1p, @n >= 0 && @n <= 3) */
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerlevel) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerlevel) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getpowerlevel(const okl4_arm_psci_suspend_state_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 2;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_psci_suspend_state_setpowerlevel, 2n >= 0 && 2n <= 3) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_setpowerlevel) */
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerlevel) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerlevel(okl4_arm_psci_suspend_state_t *x, uint32_t _power_level)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 2;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_power_level;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_arm_psci_suspend_state_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_init(okl4_arm_psci_suspend_state_t *x)
+{
+    *x = (okl4_arm_psci_suspend_state_t)0U;
+}
+
+/*lint -esym(714, okl4_arm_psci_suspend_state_cast) */
+OKL4_FORCE_INLINE okl4_arm_psci_suspend_state_t
+okl4_arm_psci_suspend_state_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_arm_psci_suspend_state_t x = (okl4_arm_psci_suspend_state_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_MMU_ENABLE_ARM_SCTLR
+    - BIT 1 -   @ref OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR
+    - BIT 2 -   @ref OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR
+    - BIT 3 -   @ref OKL4_MASK_STACK_ALIGN_ARM_SCTLR
+    - BIT 4 -   @ref OKL4_MASK_STACK_ALIGN_EL0_ARM_SCTLR
+    - BIT 5 -   @ref OKL4_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR
+    - BIT 6 -   @ref OKL4_MASK_OKL_HCR_EL2_DC_ARM_SCTLR
+    - BIT 7 -   @ref OKL4_MASK_IT_DISABLE_ARM_SCTLR
+    - BIT 8 -   @ref OKL4_MASK_SETEND_DISABLE_ARM_SCTLR
+    - BIT 9 -   @ref OKL4_MASK_USER_MASK_ACCESS_ARM_SCTLR
+    - BIT 11 -   @ref OKL4_MASK_RESERVED11_ARM_SCTLR
+    - BIT 12 -   @ref OKL4_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR
+    - BIT 13 -   @ref OKL4_MASK_VECTORS_BIT_ARM_SCTLR
+    - BIT 14 -   @ref OKL4_MASK_DCACHE_ZERO_ARM_SCTLR
+    - BIT 15 -   @ref OKL4_MASK_USER_CACHE_TYPE_ARM_SCTLR
+    - BIT 16 -   @ref OKL4_MASK_NO_TRAP_WFI_ARM_SCTLR
+    - BIT 18 -   @ref OKL4_MASK_NO_TRAP_WFE_ARM_SCTLR
+    - BIT 19 -   @ref OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR
+    - BIT 20 -   @ref OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR
+    - BIT 22 -   @ref OKL4_MASK_RESERVED22_ARM_SCTLR
+    - BIT 23 -   @ref OKL4_MASK_RESERVED23_ARM_SCTLR
+    - BIT 24 -   @ref OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR
+    - BIT 25 -   @ref OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR
+    - BIT 28 -   @ref OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR
+    - BIT 29 -   @ref OKL4_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR
+    - BIT 30 -   @ref OKL4_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR
+*/
+
+/*lint -esym(621, okl4_arm_sctlr_t) */
+typedef uint32_t okl4_arm_sctlr_t;
+
+/*lint -esym(621, okl4_arm_sctlr_getmmuenable) */
+/*lint -esym(714, okl4_arm_sctlr_getmmuenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getmmuenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setmmuenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setmmuenable(okl4_arm_sctlr_t *x, okl4_bool_t _mmu_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getalignmentcheckenable) */
+/*lint -esym(714, okl4_arm_sctlr_getalignmentcheckenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getalignmentcheckenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setalignmentcheckenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setalignmentcheckenable(okl4_arm_sctlr_t *x, okl4_bool_t _alignment_check_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getdatacacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getdatacacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdatacacheenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setdatacacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdatacacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _data_cache_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getinstructioncacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getinstructioncacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getinstructioncacheenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setinstructioncacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setinstructioncacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _instruction_cache_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getcp15barrierenable) */
+/*lint -esym(714, okl4_arm_sctlr_getcp15barrierenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getcp15barrierenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setcp15barrierenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setcp15barrierenable(okl4_arm_sctlr_t *x, okl4_bool_t _cp15_barrier_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getitdisable) */
+/*lint -esym(714, okl4_arm_sctlr_getitdisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getitdisable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setitdisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setitdisable(okl4_arm_sctlr_t *x, okl4_bool_t _it_disable);
+
+/*lint -esym(621, okl4_arm_sctlr_getsetenddisable) */
+/*lint -esym(714, okl4_arm_sctlr_getsetenddisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getsetenddisable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setsetenddisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setsetenddisable(okl4_arm_sctlr_t *x, okl4_bool_t _setend_disable);
+
+/*lint -esym(621, okl4_arm_sctlr_getreserved11) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved11) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved11(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfi) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfi) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfi(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfi) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfi(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfi);
+
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfe) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfe) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfe(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfe) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfe(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfe);
+
+/*lint -esym(621, okl4_arm_sctlr_getwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getwriteexecnever(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _write_exec_never);
+
+/*lint -esym(621, okl4_arm_sctlr_getreserved22) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved22) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved22(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_getreserved23) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved23) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved23(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_getel0endianness) */
+/*lint -esym(714, okl4_arm_sctlr_getel0endianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getel0endianness(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setel0endianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setel0endianness(okl4_arm_sctlr_t *x, okl4_bool_t _el0_endianness);
+
+/*lint -esym(621, okl4_arm_sctlr_getexceptionendianness) */
+/*lint -esym(714, okl4_arm_sctlr_getexceptionendianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getexceptionendianness(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setexceptionendianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setexceptionendianness(okl4_arm_sctlr_t *x, okl4_bool_t _exception_endianness);
+
+/*lint -esym(621, okl4_arm_sctlr_getvectorsbit) */
+/*lint -esym(714, okl4_arm_sctlr_getvectorsbit) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getvectorsbit(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setvectorsbit) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setvectorsbit(okl4_arm_sctlr_t *x, okl4_bool_t _vectors_bit);
+
+/*lint -esym(621, okl4_arm_sctlr_getuserwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getuserwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getuserwriteexecnever(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setuserwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setuserwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _user_write_exec_never);
+
+/*lint -esym(621, okl4_arm_sctlr_gettexremapenable) */
+/*lint -esym(714, okl4_arm_sctlr_gettexremapenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_gettexremapenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_settexremapenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_settexremapenable(okl4_arm_sctlr_t *x, okl4_bool_t _tex_remap_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getaccessflagenable) */
+/*lint -esym(714, okl4_arm_sctlr_getaccessflagenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getaccessflagenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setaccessflagenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setaccessflagenable(okl4_arm_sctlr_t *x, okl4_bool_t _access_flag_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getthumbexceptionenable) */
+/*lint -esym(714, okl4_arm_sctlr_getthumbexceptionenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getthumbexceptionenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setthumbexceptionenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setthumbexceptionenable(okl4_arm_sctlr_t *x, okl4_bool_t _thumb_exception_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getstackalign) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalign(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalign) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalign(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align);
+
+/*lint -esym(621, okl4_arm_sctlr_getstackalignel0) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalignel0) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalignel0(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalignel0) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalignel0(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align_el0);
+
+/*lint -esym(621, okl4_arm_sctlr_getusermaskaccess) */
+/*lint -esym(714, okl4_arm_sctlr_getusermaskaccess) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusermaskaccess(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setusermaskaccess) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusermaskaccess(okl4_arm_sctlr_t *x, okl4_bool_t _user_mask_access);
+
+/*lint -esym(621, okl4_arm_sctlr_getdcachezero) */
+/*lint -esym(714, okl4_arm_sctlr_getdcachezero) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdcachezero(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setdcachezero) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdcachezero(okl4_arm_sctlr_t *x, okl4_bool_t _dcache_zero);
+
+/*lint -esym(621, okl4_arm_sctlr_getusercachetype) */
+/*lint -esym(714, okl4_arm_sctlr_getusercachetype) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusercachetype(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setusercachetype) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusercachetype(okl4_arm_sctlr_t *x, okl4_bool_t _user_cache_type);
+
+/*lint -esym(621, okl4_arm_sctlr_getoklhcrel2dc) */
+/*lint -esym(714, okl4_arm_sctlr_getoklhcrel2dc) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getoklhcrel2dc(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setoklhcrel2dc) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setoklhcrel2dc(okl4_arm_sctlr_t *x, okl4_bool_t _okl_hcr_el2_dc);
+
+/*lint -esym(714, okl4_arm_sctlr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_init(okl4_arm_sctlr_t *x);
+
+/*lint -esym(714, okl4_arm_sctlr_cast) */
+OKL4_FORCE_INLINE okl4_arm_sctlr_t
+okl4_arm_sctlr_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_ARM_SCTLR_MMU_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_MMU_ENABLE_MASK ((okl4_arm_sctlr_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_MMU_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U)
+/*lint -esym(621, OKL4_SHIFT_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_MMU_ENABLE_ARM_SCTLR (0)
+/*lint -esym(621, OKL4_WIDTH_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_MMU_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_ALIGNMENT_CHECK_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_ALIGNMENT_CHECK_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_DATA_CACHE_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_DATA_CACHE_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 2) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 2)
+/*lint -esym(621, OKL4_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR (2)
+/*lint -esym(621, OKL4_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_STACK_ALIGN_MASK) */
+#define OKL4_ARM_SCTLR_STACK_ALIGN_MASK ((okl4_arm_sctlr_t)1U << 3) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_MASK_STACK_ALIGN_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 3)
+/*lint -esym(621, OKL4_SHIFT_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_SHIFT_STACK_ALIGN_ARM_SCTLR (3)
+/*lint -esym(621, OKL4_WIDTH_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_WIDTH_STACK_ALIGN_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_STACK_ALIGN_EL0_MASK) */
+#define OKL4_ARM_SCTLR_STACK_ALIGN_EL0_MASK ((okl4_arm_sctlr_t)1U << 4) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_MASK_STACK_ALIGN_EL0_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 4)
+/*lint -esym(621, OKL4_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR (4)
+/*lint -esym(621, OKL4_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_CP15_BARRIER_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_CP15_BARRIER_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 5) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 5)
+/*lint -esym(621, OKL4_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR (5)
+/*lint -esym(621, OKL4_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_OKL_HCR_EL2_DC_MASK) */
+#define OKL4_ARM_SCTLR_OKL_HCR_EL2_DC_MASK ((okl4_arm_sctlr_t)1U << 6) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_MASK_OKL_HCR_EL2_DC_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 6)
+/*lint -esym(621, OKL4_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR (6)
+/*lint -esym(621, OKL4_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_IT_DISABLE_MASK) */
+#define OKL4_ARM_SCTLR_IT_DISABLE_MASK ((okl4_arm_sctlr_t)1U << 7) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_MASK_IT_DISABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 7)
+/*lint -esym(621, OKL4_SHIFT_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_IT_DISABLE_ARM_SCTLR (7)
+/*lint -esym(621, OKL4_WIDTH_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_IT_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_SETEND_DISABLE_MASK) */
+#define OKL4_ARM_SCTLR_SETEND_DISABLE_MASK ((okl4_arm_sctlr_t)1U << 8) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_MASK_SETEND_DISABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 8)
+/*lint -esym(621, OKL4_SHIFT_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_SETEND_DISABLE_ARM_SCTLR (8)
+/*lint -esym(621, OKL4_WIDTH_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_SETEND_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_USER_MASK_ACCESS_MASK) */
+#define OKL4_ARM_SCTLR_USER_MASK_ACCESS_MASK ((okl4_arm_sctlr_t)1U << 9) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_MASK_USER_MASK_ACCESS_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 9)
+/*lint -esym(621, OKL4_SHIFT_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_SHIFT_USER_MASK_ACCESS_ARM_SCTLR (9)
+/*lint -esym(621, OKL4_WIDTH_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_WIDTH_USER_MASK_ACCESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED11_MASK) */
+#define OKL4_ARM_SCTLR_RESERVED11_MASK ((okl4_arm_sctlr_t)1U << 11) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESERVED11_ARM_SCTLR) */
+#define OKL4_MASK_RESERVED11_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 11)
+/*lint -esym(621, OKL4_SHIFT_RESERVED11_ARM_SCTLR) */
+#define OKL4_SHIFT_RESERVED11_ARM_SCTLR (11)
+/*lint -esym(621, OKL4_WIDTH_RESERVED11_ARM_SCTLR) */
+#define OKL4_WIDTH_RESERVED11_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_INSTRUCTION_CACHE_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_INSTRUCTION_CACHE_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 12) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 12)
+/*lint -esym(621, OKL4_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (12)
+/*lint -esym(621, OKL4_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_VECTORS_BIT_MASK) */
+#define OKL4_ARM_SCTLR_VECTORS_BIT_MASK ((okl4_arm_sctlr_t)1U << 13) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_MASK_VECTORS_BIT_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 13)
+/*lint -esym(621, OKL4_SHIFT_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_SHIFT_VECTORS_BIT_ARM_SCTLR (13)
+/*lint -esym(621, OKL4_WIDTH_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_WIDTH_VECTORS_BIT_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_DCACHE_ZERO_MASK) */
+#define OKL4_ARM_SCTLR_DCACHE_ZERO_MASK ((okl4_arm_sctlr_t)1U << 14) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_MASK_DCACHE_ZERO_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 14)
+/*lint -esym(621, OKL4_SHIFT_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_SHIFT_DCACHE_ZERO_ARM_SCTLR (14)
+/*lint -esym(621, OKL4_WIDTH_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_WIDTH_DCACHE_ZERO_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_USER_CACHE_TYPE_MASK) */
+#define OKL4_ARM_SCTLR_USER_CACHE_TYPE_MASK ((okl4_arm_sctlr_t)1U << 15) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_MASK_USER_CACHE_TYPE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 15)
+/*lint -esym(621, OKL4_SHIFT_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_SHIFT_USER_CACHE_TYPE_ARM_SCTLR (15)
+/*lint -esym(621, OKL4_WIDTH_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_WIDTH_USER_CACHE_TYPE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_NO_TRAP_WFI_MASK) */
+#define OKL4_ARM_SCTLR_NO_TRAP_WFI_MASK ((okl4_arm_sctlr_t)1U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_MASK_NO_TRAP_WFI_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 16)
+/*lint -esym(621, OKL4_SHIFT_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_SHIFT_NO_TRAP_WFI_ARM_SCTLR (16)
+/*lint -esym(621, OKL4_WIDTH_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_WIDTH_NO_TRAP_WFI_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_NO_TRAP_WFE_MASK) */
+#define OKL4_ARM_SCTLR_NO_TRAP_WFE_MASK ((okl4_arm_sctlr_t)1U << 18) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_MASK_NO_TRAP_WFE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 18)
+/*lint -esym(621, OKL4_SHIFT_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_SHIFT_NO_TRAP_WFE_ARM_SCTLR (18)
+/*lint -esym(621, OKL4_WIDTH_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_WIDTH_NO_TRAP_WFE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_WRITE_EXEC_NEVER_MASK) */
+#define OKL4_ARM_SCTLR_WRITE_EXEC_NEVER_MASK ((okl4_arm_sctlr_t)1U << 19) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 19)
+/*lint -esym(621, OKL4_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR (19)
+/*lint -esym(621, OKL4_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_USER_WRITE_EXEC_NEVER_MASK) */
+#define OKL4_ARM_SCTLR_USER_WRITE_EXEC_NEVER_MASK ((okl4_arm_sctlr_t)1U << 20) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 20)
+/*lint -esym(621, OKL4_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR (20)
+/*lint -esym(621, OKL4_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED22_MASK) */
+#define OKL4_ARM_SCTLR_RESERVED22_MASK ((okl4_arm_sctlr_t)1U << 22) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESERVED22_ARM_SCTLR) */
+#define OKL4_MASK_RESERVED22_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 22)
+/*lint -esym(621, OKL4_SHIFT_RESERVED22_ARM_SCTLR) */
+#define OKL4_SHIFT_RESERVED22_ARM_SCTLR (22)
+/*lint -esym(621, OKL4_WIDTH_RESERVED22_ARM_SCTLR) */
+#define OKL4_WIDTH_RESERVED22_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED23_MASK) */
+#define OKL4_ARM_SCTLR_RESERVED23_MASK ((okl4_arm_sctlr_t)1U << 23) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESERVED23_ARM_SCTLR) */
+#define OKL4_MASK_RESERVED23_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 23)
+/*lint -esym(621, OKL4_SHIFT_RESERVED23_ARM_SCTLR) */
+#define OKL4_SHIFT_RESERVED23_ARM_SCTLR (23)
+/*lint -esym(621, OKL4_WIDTH_RESERVED23_ARM_SCTLR) */
+#define OKL4_WIDTH_RESERVED23_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_EL0_ENDIANNESS_MASK) */
+#define OKL4_ARM_SCTLR_EL0_ENDIANNESS_MASK ((okl4_arm_sctlr_t)1U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 24)
+/*lint -esym(621, OKL4_SHIFT_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_SHIFT_EL0_ENDIANNESS_ARM_SCTLR (24)
+/*lint -esym(621, OKL4_WIDTH_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_WIDTH_EL0_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_EXCEPTION_ENDIANNESS_MASK) */
+#define OKL4_ARM_SCTLR_EXCEPTION_ENDIANNESS_MASK ((okl4_arm_sctlr_t)1U << 25) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 25)
+/*lint -esym(621, OKL4_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR (25)
+/*lint -esym(621, OKL4_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_TEX_REMAP_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_TEX_REMAP_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 28) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 28)
+/*lint -esym(621, OKL4_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR (28)
+/*lint -esym(621, OKL4_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_ACCESS_FLAG_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_ACCESS_FLAG_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 29) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 29)
+/*lint -esym(621, OKL4_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR (29)
+/*lint -esym(621, OKL4_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_THUMB_EXCEPTION_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_THUMB_EXCEPTION_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 30) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (30)
+/*lint -esym(621, OKL4_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (1)
+
+
+/*lint -sem(okl4_arm_sctlr_getmmuenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getmmuenable) */
+/*lint -esym(714, okl4_arm_sctlr_getmmuenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getmmuenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setmmuenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setmmuenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setmmuenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setmmuenable(okl4_arm_sctlr_t *x, okl4_bool_t _mmu_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_mmu_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getalignmentcheckenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getalignmentcheckenable) */
+/*lint -esym(714, okl4_arm_sctlr_getalignmentcheckenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getalignmentcheckenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setalignmentcheckenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setalignmentcheckenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setalignmentcheckenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setalignmentcheckenable(okl4_arm_sctlr_t *x, okl4_bool_t _alignment_check_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_alignment_check_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getdatacacheenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getdatacacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getdatacacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdatacacheenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setdatacacheenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setdatacacheenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setdatacacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdatacacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _data_cache_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_data_cache_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getstackalign, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getstackalign) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalign(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setstackalign, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setstackalign) */
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalign) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalign(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_stack_align;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getstackalignel0, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getstackalignel0) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalignel0) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalignel0(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setstackalignel0, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setstackalignel0) */
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalignel0) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalignel0(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align_el0)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_stack_align_el0;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getcp15barrierenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getcp15barrierenable) */
+/*lint -esym(714, okl4_arm_sctlr_getcp15barrierenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getcp15barrierenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setcp15barrierenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setcp15barrierenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setcp15barrierenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setcp15barrierenable(okl4_arm_sctlr_t *x, okl4_bool_t _cp15_barrier_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_cp15_barrier_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getoklhcrel2dc, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getoklhcrel2dc) */
+/*lint -esym(714, okl4_arm_sctlr_getoklhcrel2dc) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getoklhcrel2dc(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 6;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setoklhcrel2dc, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setoklhcrel2dc) */
+
+/*lint -esym(621, okl4_arm_sctlr_setoklhcrel2dc) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setoklhcrel2dc(okl4_arm_sctlr_t *x, okl4_bool_t _okl_hcr_el2_dc)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 6;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_okl_hcr_el2_dc;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getitdisable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getitdisable) */
+/*lint -esym(714, okl4_arm_sctlr_getitdisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getitdisable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setitdisable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setitdisable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setitdisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setitdisable(okl4_arm_sctlr_t *x, okl4_bool_t _it_disable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_it_disable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getsetenddisable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getsetenddisable) */
+/*lint -esym(714, okl4_arm_sctlr_getsetenddisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getsetenddisable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setsetenddisable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setsetenddisable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setsetenddisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setsetenddisable(okl4_arm_sctlr_t *x, okl4_bool_t _setend_disable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_setend_disable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getusermaskaccess, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getusermaskaccess) */
+/*lint -esym(714, okl4_arm_sctlr_getusermaskaccess) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusermaskaccess(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 9;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setusermaskaccess, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setusermaskaccess) */
+
+/*lint -esym(621, okl4_arm_sctlr_setusermaskaccess) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusermaskaccess(okl4_arm_sctlr_t *x, okl4_bool_t _user_mask_access)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 9;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_user_mask_access;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getreserved11, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getreserved11) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved11) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved11(const okl4_arm_sctlr_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 11;
+            uint32_t field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_getinstructioncacheenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getinstructioncacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getinstructioncacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getinstructioncacheenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 12;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setinstructioncacheenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setinstructioncacheenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setinstructioncacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setinstructioncacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _instruction_cache_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 12;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_instruction_cache_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getvectorsbit, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getvectorsbit) */
+/*lint -esym(714, okl4_arm_sctlr_getvectorsbit) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getvectorsbit(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 13;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setvectorsbit, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setvectorsbit) */
+
+/*lint -esym(621, okl4_arm_sctlr_setvectorsbit) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setvectorsbit(okl4_arm_sctlr_t *x, okl4_bool_t _vectors_bit)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 13;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_vectors_bit;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getdcachezero, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getdcachezero) */
+/*lint -esym(714, okl4_arm_sctlr_getdcachezero) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdcachezero(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 14;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setdcachezero, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setdcachezero) */
+
+/*lint -esym(621, okl4_arm_sctlr_setdcachezero) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdcachezero(okl4_arm_sctlr_t *x, okl4_bool_t _dcache_zero)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 14;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_dcache_zero;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getusercachetype, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getusercachetype) */
+/*lint -esym(714, okl4_arm_sctlr_getusercachetype) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusercachetype(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 15;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setusercachetype, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setusercachetype) */
+
+/*lint -esym(621, okl4_arm_sctlr_setusercachetype) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusercachetype(okl4_arm_sctlr_t *x, okl4_bool_t _user_cache_type)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 15;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_user_cache_type;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getnotrapwfi, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfi) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfi) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfi(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setnotrapwfi, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setnotrapwfi) */
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfi) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfi(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfi)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_no_trap_wfi;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getnotrapwfe, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfe) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfe) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfe(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 18;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setnotrapwfe, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setnotrapwfe) */
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfe) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfe(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfe)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 18;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_no_trap_wfe;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getwriteexecnever, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getwriteexecnever(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 19;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setwriteexecnever, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setwriteexecnever) */
+
+/*lint -esym(621, okl4_arm_sctlr_setwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _write_exec_never)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 19;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_write_exec_never;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getuserwriteexecnever, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getuserwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getuserwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getuserwriteexecnever(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 20;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setuserwriteexecnever, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setuserwriteexecnever) */
+
+/*lint -esym(621, okl4_arm_sctlr_setuserwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setuserwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _user_write_exec_never)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 20;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_user_write_exec_never;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getreserved22, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getreserved22) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved22) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved22(const okl4_arm_sctlr_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 22;
+            uint32_t field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_getreserved23, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getreserved23) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved23) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved23(const okl4_arm_sctlr_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 23;
+            uint32_t field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_getel0endianness, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getel0endianness) */
+/*lint -esym(714, okl4_arm_sctlr_getel0endianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getel0endianness(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setel0endianness, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setel0endianness) */
+
+/*lint -esym(621, okl4_arm_sctlr_setel0endianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setel0endianness(okl4_arm_sctlr_t *x, okl4_bool_t _el0_endianness)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_el0_endianness;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getexceptionendianness, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getexceptionendianness) */
+/*lint -esym(714, okl4_arm_sctlr_getexceptionendianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getexceptionendianness(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 25;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setexceptionendianness, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setexceptionendianness) */
+
+/*lint -esym(621, okl4_arm_sctlr_setexceptionendianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setexceptionendianness(okl4_arm_sctlr_t *x, okl4_bool_t _exception_endianness)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 25;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_exception_endianness;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_gettexremapenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_gettexremapenable) */
+/*lint -esym(714, okl4_arm_sctlr_gettexremapenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_gettexremapenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_settexremapenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_settexremapenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_settexremapenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_settexremapenable(okl4_arm_sctlr_t *x, okl4_bool_t _tex_remap_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_tex_remap_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getaccessflagenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getaccessflagenable) */
+/*lint -esym(714, okl4_arm_sctlr_getaccessflagenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getaccessflagenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 29;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setaccessflagenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setaccessflagenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setaccessflagenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setaccessflagenable(okl4_arm_sctlr_t *x, okl4_bool_t _access_flag_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 29;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_access_flag_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getthumbexceptionenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getthumbexceptionenable) */
+/*lint -esym(714, okl4_arm_sctlr_getthumbexceptionenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getthumbexceptionenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setthumbexceptionenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setthumbexceptionenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setthumbexceptionenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setthumbexceptionenable(okl4_arm_sctlr_t *x, okl4_bool_t _thumb_exception_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_thumb_exception_enable;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_arm_sctlr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_init(okl4_arm_sctlr_t *x)
+{
+    *x = (okl4_arm_sctlr_t)12912928U;
+}
+
+/*lint -esym(714, okl4_arm_sctlr_cast) */
+OKL4_FORCE_INLINE okl4_arm_sctlr_t
+okl4_arm_sctlr_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_arm_sctlr_t x = (okl4_arm_sctlr_t)p;
+    if (force) {
+        x &= ~(okl4_arm_sctlr_t)0x800U;
+        x |= (okl4_arm_sctlr_t)0x800U; /* x.reserved11 */
+        x &= ~(okl4_arm_sctlr_t)0x400000U;
+        x |= (okl4_arm_sctlr_t)0x400000U; /* x.reserved22 */
+        x &= ~(okl4_arm_sctlr_t)0x800000U;
+        x |= (okl4_arm_sctlr_t)0x800000U; /* x.reserved23 */
+    }
+    return x;
+}
+
+
+
+
+typedef uint32_t okl4_arm_smccc_arch_function_t;
+
+/*lint -esym(621, OKL4_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION) */
+#define OKL4_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION ((okl4_arm_smccc_arch_function_t)0x0U)
+/*lint -esym(621, OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES) */
+#define OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES ((okl4_arm_smccc_arch_function_t)0x1U)
+/*lint -esym(621, OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1) */
+#define OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1 ((okl4_arm_smccc_arch_function_t)0x8000U)
+
+/*lint -esym(714, okl4_arm_smccc_arch_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_arch_function_is_element_of(okl4_arm_smccc_arch_function_t var);
+
+
+/*lint -esym(714, okl4_arm_smccc_arch_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_arch_function_is_element_of(okl4_arm_smccc_arch_function_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION) ||
+            (var == OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES) ||
+            (var == OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1));
+}
+
+
+
+typedef uint32_t okl4_arm_smccc_result_t;
+
+/*lint -esym(621, OKL4_ARM_SMCCC_RESULT_SUCCESS) */
+#define OKL4_ARM_SMCCC_RESULT_SUCCESS ((okl4_arm_smccc_result_t)0x0U)
+/*lint -esym(621, OKL4_ARM_SMCCC_RESULT_NOT_SUPPORTED) */
+#define OKL4_ARM_SMCCC_RESULT_NOT_SUPPORTED ((okl4_arm_smccc_result_t)0xffffffffU)
+
+/*lint -esym(714, okl4_arm_smccc_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_result_is_element_of(okl4_arm_smccc_result_t var);
+
+
+/*lint -esym(714, okl4_arm_smccc_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_result_is_element_of(okl4_arm_smccc_result_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ARM_SMCCC_RESULT_SUCCESS) ||
+            (var == OKL4_ARM_SMCCC_RESULT_NOT_SUPPORTED));
+}
+
+
+/**
+    The `okl4_register_t` type represents an unsigned, machine-native
+    register-sized integer value.
+*/
+
+typedef uint64_t okl4_register_t;
+
+
+
+
+
+typedef okl4_register_t okl4_atomic_raw_register_t;
+
+
+
+
+
+
+
+
+
+typedef uint16_t okl4_atomic_raw_uint16_t;
+
+
+
+
+
+typedef uint32_t okl4_atomic_raw_uint32_t;
+
+
+
+
+
+typedef uint64_t okl4_atomic_raw_uint64_t;
+
+
+
+
+
+
+
+
+
+typedef uint8_t okl4_atomic_raw_uint8_t;
+
+
+
+
+/**
+    The okl4_atomic_register_t type implements a machine-word-sized value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_register {
+    volatile okl4_atomic_raw_register_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_register_t type implements a machine-word-sized value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_register okl4_atomic_register_t;
+
+
+
+
+/**
+    The okl4_atomic_uint16_t type implements a 16-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint16 {
+    volatile okl4_atomic_raw_uint16_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_uint16_t type implements a 16-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint16 okl4_atomic_uint16_t;
+
+
+
+
+/**
+    The okl4_atomic_uint32_t type implements a 32-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint32 {
+    volatile okl4_atomic_raw_uint32_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_uint32_t type implements a 32-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint32 okl4_atomic_uint32_t;
+
+
+
+
+/**
+    The okl4_atomic_uint64_t type implements a 64-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint64 {
+    volatile okl4_atomic_raw_uint64_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_uint64_t type implements a 64-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint64 okl4_atomic_uint64_t;
+
+
+
+
+/**
+    The okl4_atomic_uint8_t type implements an 8-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint8 {
+    volatile okl4_atomic_raw_uint8_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_uint8_t type implements an 8-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint8 okl4_atomic_uint8_t;
+
+
+
+
+/**
+    The `okl4_count_t` type represents a natural number of items or
+    iterations. This type is unsigned and cannot represent error values; use
+    `okl4_scount_t` if an error representation is required.
+*/
+
+typedef uint32_t okl4_count_t;
+
+/*lint -esym(621, OKL4_DEFAULT_PAGEBITS) */
+#define OKL4_DEFAULT_PAGEBITS ((okl4_count_t)(12U))
+
+/** The maximum limit for segment index retured in mmu_lookup_segment. */
+/*lint -esym(621, OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK) */
+#define OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK ((okl4_count_t)(1023U))
+
+/** The maximum limit for segment attachments to a KMMU. */
+/*lint -esym(621, OKL4_KMMU_MAX_SEGMENTS) */
+#define OKL4_KMMU_MAX_SEGMENTS ((okl4_count_t)(256U))
+
+/*lint -esym(621, OKL4_PROFILE_NO_PCPUS) */
+#define OKL4_PROFILE_NO_PCPUS ((okl4_count_t)(0xffffffffU))
+
+
+
+/**
+    The `okl4_kcap_t` type represents a kernel object capability identifier
+    (otherwise known as *designator* or *cap*) that addresses a kernel
+    capability. A capability encodes rights to perform particular operations on
+    a kernel object.
+*/
+
+typedef okl4_count_t okl4_kcap_t;
+
+/*lint -esym(621, OKL4_KCAP_INVALID) */
+#define OKL4_KCAP_INVALID ((okl4_kcap_t)(0xffffffffU))
+
+
+
+/**
+    The `okl4_interrupt_number_t` type is an index into the interrupt ID
+    space. For platforms with a single simple interrupt controller, this is
+    the physical interrupt number. When there are multiple interrupt
+    controllers, or a large and sparse interrupt ID space, the mapping from
+    this type to the physical interrupt is defined by the KSP.
+*/
+
+typedef okl4_count_t okl4_interrupt_number_t;
+
+/*lint -esym(621, OKL4_INTERRUPT_INVALID_IRQ) */
+#define OKL4_INTERRUPT_INVALID_IRQ ((okl4_interrupt_number_t)(1023U))
+
+/*lint -esym(621, OKL4_INVALID_VIRQ) */
+#define OKL4_INVALID_VIRQ ((okl4_interrupt_number_t)(1023U))
+
+
+
+
+typedef okl4_interrupt_number_t okl4_irq_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_axon_data {
+    okl4_kcap_t kcap;
+    okl4_kcap_t segment;
+    okl4_irq_t virq;
+};
+
+
+
+
+/**
+    The `okl4_psize_t` type represents an unsigned integer value which is large
+    enough to represent the size of any physical memory object.
+*/
+
+typedef okl4_register_t okl4_psize_t;
+
+
+
+
+/**
+    The `okl4_lsize_t` type represents an unsigned integer value which is large
+    enough to represent the size of any guest logical memory object.
+*/
+
+typedef okl4_psize_t okl4_lsize_t;
+
+/*lint -esym(621, OKL4_DEFAULT_PAGESIZE) */
+#define OKL4_DEFAULT_PAGESIZE ((okl4_lsize_t)(4096U))
+
+
+
+/**
+    The `okl4_laddr_t` type represents an unsigned integer value which is large
+    enough to contain a guest logical address; that is, an address in the
+    input address space of the guest's virtual MMU. This may be larger than
+    the machine's pointer type.
+*/
+
+typedef okl4_lsize_t okl4_laddr_t;
+
+/*lint -esym(621, OKL4_USER_AREA_END) */
+#define OKL4_USER_AREA_END ((okl4_laddr_t)(17592186044416U))
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_PENDING_AXON_DATA_INFO
+    - BIT 1 -   @ref OKL4_MASK_FAILURE_AXON_DATA_INFO
+    - BIT 2 -   @ref OKL4_MASK_USR_AXON_DATA_INFO
+    - BITS 63..3 -   @ref OKL4_MASK_LADDR_AXON_DATA_INFO
+*/
+
+/*lint -esym(621, okl4_axon_data_info_t) */
+typedef okl4_laddr_t okl4_axon_data_info_t;
+
+/*lint -esym(621, okl4_axon_data_info_getpending) */
+/*lint -esym(714, okl4_axon_data_info_getpending) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getpending(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setpending) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setpending(okl4_axon_data_info_t *x, okl4_bool_t _pending);
+
+/*lint -esym(621, okl4_axon_data_info_getfailure) */
+/*lint -esym(714, okl4_axon_data_info_getfailure) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getfailure(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setfailure) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setfailure(okl4_axon_data_info_t *x, okl4_bool_t _failure);
+
+/*lint -esym(621, okl4_axon_data_info_getusr) */
+/*lint -esym(714, okl4_axon_data_info_getusr) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getusr(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setusr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setusr(okl4_axon_data_info_t *x, okl4_bool_t _usr);
+
+/*lint -esym(621, okl4_axon_data_info_getladdr) */
+/*lint -esym(714, okl4_axon_data_info_getladdr) */
+OKL4_FORCE_INLINE okl4_laddr_t
+okl4_axon_data_info_getladdr(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setladdr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setladdr(okl4_axon_data_info_t *x, okl4_laddr_t _laddr);
+
+/*lint -esym(714, okl4_axon_data_info_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_init(okl4_axon_data_info_t *x);
+
+/*lint -esym(714, okl4_axon_data_info_cast) */
+OKL4_FORCE_INLINE okl4_axon_data_info_t
+okl4_axon_data_info_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_AXON_DATA_INFO_PENDING_MASK) */
+#define OKL4_AXON_DATA_INFO_PENDING_MASK ((okl4_axon_data_info_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_PENDING_AXON_DATA_INFO) */
+#define OKL4_MASK_PENDING_AXON_DATA_INFO ((okl4_axon_data_info_t)1U)
+/*lint -esym(621, OKL4_SHIFT_PENDING_AXON_DATA_INFO) */
+#define OKL4_SHIFT_PENDING_AXON_DATA_INFO (0)
+/*lint -esym(621, OKL4_WIDTH_PENDING_AXON_DATA_INFO) */
+#define OKL4_WIDTH_PENDING_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_AXON_DATA_INFO_FAILURE_MASK) */
+#define OKL4_AXON_DATA_INFO_FAILURE_MASK ((okl4_axon_data_info_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_FAILURE_AXON_DATA_INFO) */
+#define OKL4_MASK_FAILURE_AXON_DATA_INFO ((okl4_axon_data_info_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_FAILURE_AXON_DATA_INFO) */
+#define OKL4_SHIFT_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_WIDTH_FAILURE_AXON_DATA_INFO) */
+#define OKL4_WIDTH_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_AXON_DATA_INFO_USR_MASK) */
+#define OKL4_AXON_DATA_INFO_USR_MASK ((okl4_axon_data_info_t)1U << 2) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USR_AXON_DATA_INFO) */
+#define OKL4_MASK_USR_AXON_DATA_INFO ((okl4_axon_data_info_t)1U << 2)
+/*lint -esym(621, OKL4_SHIFT_USR_AXON_DATA_INFO) */
+#define OKL4_SHIFT_USR_AXON_DATA_INFO (2)
+/*lint -esym(621, OKL4_WIDTH_USR_AXON_DATA_INFO) */
+#define OKL4_WIDTH_USR_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_AXON_DATA_INFO_LADDR_MASK) */
+#define OKL4_AXON_DATA_INFO_LADDR_MASK ((okl4_axon_data_info_t)2305843009213693951U << 3) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_LADDR_AXON_DATA_INFO) */
+#define OKL4_MASK_LADDR_AXON_DATA_INFO ((okl4_axon_data_info_t)2305843009213693951U << 3)
+/*lint -esym(621, OKL4_SHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_SHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_PRESHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_PRESHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_WIDTH_LADDR_AXON_DATA_INFO) */
+#define OKL4_WIDTH_LADDR_AXON_DATA_INFO (61)
+
+
+/*lint -sem(okl4_axon_data_info_getpending, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_data_info_getpending) */
+/*lint -esym(714, okl4_axon_data_info_getpending) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getpending(const okl4_axon_data_info_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_data_info_setpending, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_data_info_setpending) */
+
+/*lint -esym(621, okl4_axon_data_info_setpending) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setpending(okl4_axon_data_info_t *x, okl4_bool_t _pending)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_pending;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_data_info_getfailure, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_data_info_getfailure) */
+/*lint -esym(714, okl4_axon_data_info_getfailure) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getfailure(const okl4_axon_data_info_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_data_info_setfailure, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_data_info_setfailure) */
+
+/*lint -esym(621, okl4_axon_data_info_setfailure) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setfailure(okl4_axon_data_info_t *x, okl4_bool_t _failure)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_failure;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_data_info_getusr, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_data_info_getusr) */
+/*lint -esym(714, okl4_axon_data_info_getusr) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getusr(const okl4_axon_data_info_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_data_info_setusr, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_data_info_setusr) */
+
+/*lint -esym(621, okl4_axon_data_info_setusr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setusr(okl4_axon_data_info_t *x, okl4_bool_t _usr)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_usr;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_data_info_getladdr, 1p) */
+/*lint -esym(621, okl4_axon_data_info_getladdr) */
+/*lint -esym(714, okl4_axon_data_info_getladdr) */
+OKL4_FORCE_INLINE okl4_laddr_t
+okl4_axon_data_info_getladdr(const okl4_axon_data_info_t *x)
+{
+    okl4_laddr_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 3;
+            uint64_t field : 61;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_laddr_t)_conv.bits.field;
+    return (okl4_laddr_t)(field << 3);
+}
+
+/*lint -esym(714, okl4_axon_data_info_setladdr) */
+
+/*lint -esym(621, okl4_axon_data_info_setladdr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setladdr(okl4_axon_data_info_t *x, okl4_laddr_t _laddr)
+{
+    okl4_laddr_t val = _laddr >> 3;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 3;
+            uint64_t field : 61;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)val;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_axon_data_info_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_init(okl4_axon_data_info_t *x)
+{
+    *x = (okl4_axon_data_info_t)0U;
+}
+
+/*lint -esym(714, okl4_axon_data_info_cast) */
+OKL4_FORCE_INLINE okl4_axon_data_info_t
+okl4_axon_data_info_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_axon_data_info_t x = (okl4_axon_data_info_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+
+*/
+
+struct okl4_axon_ep_data {
+    struct okl4_axon_data rx;
+    struct okl4_axon_data tx;
+};
+
+
+
+
+
+
+
+
+
+typedef char _okl4_padding_t;
+
+
+
+
+
+struct okl4_axon_queue {
+    uint32_t queue_offset;
+    uint16_t entries;
+    volatile uint16_t kptr;
+    volatile uint16_t uptr;
+    _okl4_padding_t __padding0_2; /**< Padding 4 */
+    _okl4_padding_t __padding1_3; /**< Padding 4 */
+};
+
+
+
+
+
+
+/**
+    The `okl4_ksize_t` type represents an unsigned integer value which is large
+    enough to represent the size of any kernel-accessible memory object.
+*/
+
+typedef okl4_lsize_t okl4_ksize_t;
+
+
+
+
+
+struct okl4_axon_queue_entry {
+    okl4_axon_data_info_t info;
+    okl4_ksize_t data_size;
+    uint32_t recv_sequence;
+    _okl4_padding_t __padding0_4; /**< Padding 8 */
+    _okl4_padding_t __padding1_5; /**< Padding 8 */
+    _okl4_padding_t __padding2_6; /**< Padding 8 */
+    _okl4_padding_t __padding3_7; /**< Padding 8 */
+};
+
+
+
+
+
+
+/**
+    - BITS 4..0 -   @ref OKL4_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE
+    - BITS 12..8 -   @ref OKL4_MASK_MIN_ORDER_AXON_QUEUE_SIZE
+*/
+
+/*lint -esym(621, okl4_axon_queue_size_t) */
+typedef uint16_t okl4_axon_queue_size_t;
+
+/*lint -esym(621, okl4_axon_queue_size_getallocorder) */
+/*lint -esym(714, okl4_axon_queue_size_getallocorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getallocorder(const okl4_axon_queue_size_t *x);
+
+/*lint -esym(621, okl4_axon_queue_size_setallocorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setallocorder(okl4_axon_queue_size_t *x, okl4_count_t _alloc_order);
+
+/*lint -esym(621, okl4_axon_queue_size_getminorder) */
+/*lint -esym(714, okl4_axon_queue_size_getminorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getminorder(const okl4_axon_queue_size_t *x);
+
+/*lint -esym(621, okl4_axon_queue_size_setminorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setminorder(okl4_axon_queue_size_t *x, okl4_count_t _min_order);
+
+/*lint -esym(714, okl4_axon_queue_size_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_init(okl4_axon_queue_size_t *x);
+
+/*lint -esym(714, okl4_axon_queue_size_cast) */
+OKL4_FORCE_INLINE okl4_axon_queue_size_t
+okl4_axon_queue_size_cast(uint16_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_AXON_QUEUE_SIZE_ALLOC_ORDER_MASK) */
+#define OKL4_AXON_QUEUE_SIZE_ALLOC_ORDER_MASK (okl4_axon_queue_size_t)(31U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE (okl4_axon_queue_size_t)(31U)
+/*lint -esym(621, OKL4_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE (0)
+/*lint -esym(621, OKL4_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE (5)
+/*lint -esym(621, OKL4_AXON_QUEUE_SIZE_MIN_ORDER_MASK) */
+#define OKL4_AXON_QUEUE_SIZE_MIN_ORDER_MASK (okl4_axon_queue_size_t)(31U << 8) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_MASK_MIN_ORDER_AXON_QUEUE_SIZE (okl4_axon_queue_size_t)(31U << 8)
+/*lint -esym(621, OKL4_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE (8)
+/*lint -esym(621, OKL4_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE (5)
+
+
+/*lint -sem(okl4_axon_queue_size_getallocorder, 1p, @n >= 0 && @n <= 31) */
+/*lint -esym(621, okl4_axon_queue_size_getallocorder) */
+/*lint -esym(714, okl4_axon_queue_size_getallocorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getallocorder(const okl4_axon_queue_size_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 5;
+        } bits;
+        okl4_axon_queue_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_queue_size_setallocorder, 2n >= 0 && 2n <= 31) */
+/*lint -esym(714, okl4_axon_queue_size_setallocorder) */
+
+/*lint -esym(621, okl4_axon_queue_size_setallocorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setallocorder(okl4_axon_queue_size_t *x, okl4_count_t _alloc_order)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 5;
+        } bits;
+        okl4_axon_queue_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_alloc_order;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_queue_size_getminorder, 1p, @n >= 0 && @n <= 31) */
+/*lint -esym(621, okl4_axon_queue_size_getminorder) */
+/*lint -esym(714, okl4_axon_queue_size_getminorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getminorder(const okl4_axon_queue_size_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            uint32_t field : 5;
+        } bits;
+        okl4_axon_queue_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_queue_size_setminorder, 2n >= 0 && 2n <= 31) */
+/*lint -esym(714, okl4_axon_queue_size_setminorder) */
+
+/*lint -esym(621, okl4_axon_queue_size_setminorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setminorder(okl4_axon_queue_size_t *x, okl4_count_t _min_order)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            uint32_t field : 5;
+        } bits;
+        okl4_axon_queue_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_min_order;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_axon_queue_size_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_init(okl4_axon_queue_size_t *x)
+{
+    *x = (okl4_axon_queue_size_t)0U;
+}
+
+/*lint -esym(714, okl4_axon_queue_size_cast) */
+OKL4_FORCE_INLINE okl4_axon_queue_size_t
+okl4_axon_queue_size_cast(uint16_t p, okl4_bool_t force)
+{
+    okl4_axon_queue_size_t x = (okl4_axon_queue_size_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct okl4_axon_rx {
+    struct okl4_axon_queue queues[4];
+    okl4_axon_queue_size_t queue_sizes[4];
+};
+
+
+
+
+
+
+
+struct okl4_axon_tx {
+    struct okl4_axon_queue queues[4];
+};
+
+
+
+
+
+
+
+typedef okl4_register_t okl4_virq_flags_t;
+
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_READY_AXON_VIRQ_FLAGS
+    - BIT 1 -   @ref OKL4_MASK_FAULT_AXON_VIRQ_FLAGS
+*/
+
+/*lint -esym(621, okl4_axon_virq_flags_t) */
+typedef okl4_virq_flags_t okl4_axon_virq_flags_t;
+
+/*lint -esym(621, okl4_axon_virq_flags_getready) */
+/*lint -esym(714, okl4_axon_virq_flags_getready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getready(const okl4_axon_virq_flags_t *x);
+
+/*lint -esym(621, okl4_axon_virq_flags_setready) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setready(okl4_axon_virq_flags_t *x, okl4_bool_t _ready);
+
+/*lint -esym(621, okl4_axon_virq_flags_getfault) */
+/*lint -esym(714, okl4_axon_virq_flags_getfault) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getfault(const okl4_axon_virq_flags_t *x);
+
+/*lint -esym(621, okl4_axon_virq_flags_setfault) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setfault(okl4_axon_virq_flags_t *x, okl4_bool_t _fault);
+
+/*lint -esym(714, okl4_axon_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_init(okl4_axon_virq_flags_t *x);
+
+/*lint -esym(714, okl4_axon_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_axon_virq_flags_t
+okl4_axon_virq_flags_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_AXON_VIRQ_FLAGS_READY_MASK) */
+#define OKL4_AXON_VIRQ_FLAGS_READY_MASK ((okl4_axon_virq_flags_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_MASK_READY_AXON_VIRQ_FLAGS ((okl4_axon_virq_flags_t)1U)
+/*lint -esym(621, OKL4_SHIFT_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_SHIFT_READY_AXON_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_WIDTH_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_WIDTH_READY_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_AXON_VIRQ_FLAGS_FAULT_MASK) */
+#define OKL4_AXON_VIRQ_FLAGS_FAULT_MASK ((okl4_axon_virq_flags_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_MASK_FAULT_AXON_VIRQ_FLAGS ((okl4_axon_virq_flags_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_SHIFT_FAULT_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_WIDTH_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_WIDTH_FAULT_AXON_VIRQ_FLAGS (1)
+
+
+/*lint -sem(okl4_axon_virq_flags_getready, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_virq_flags_getready) */
+/*lint -esym(714, okl4_axon_virq_flags_getready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getready(const okl4_axon_virq_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_axon_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_virq_flags_setready, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_virq_flags_setready) */
+
+/*lint -esym(621, okl4_axon_virq_flags_setready) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setready(okl4_axon_virq_flags_t *x, okl4_bool_t _ready)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_axon_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_ready;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_virq_flags_getfault, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_virq_flags_getfault) */
+/*lint -esym(714, okl4_axon_virq_flags_getfault) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getfault(const okl4_axon_virq_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_virq_flags_setfault, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_virq_flags_setfault) */
+
+/*lint -esym(621, okl4_axon_virq_flags_setfault) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setfault(okl4_axon_virq_flags_t *x, okl4_bool_t _fault)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_fault;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_axon_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_init(okl4_axon_virq_flags_t *x)
+{
+    *x = (okl4_axon_virq_flags_t)0U;
+}
+
+/*lint -esym(714, okl4_axon_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_axon_virq_flags_t
+okl4_axon_virq_flags_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_axon_virq_flags_t x = (okl4_axon_virq_flags_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    The `okl4_page_cache_t` object represents a set of attributes that
+    controls the caching behaviour of memory page mappings.
+
+    - @ref OKL4_PAGE_CACHE_WRITECOMBINE
+    - @ref OKL4_PAGE_CACHE_DEFAULT
+    - @ref OKL4_PAGE_CACHE_IPC_RX
+    - @ref OKL4_PAGE_CACHE_IPC_TX
+    - @ref OKL4_PAGE_CACHE_TRACEBUFFER
+    - @ref OKL4_PAGE_CACHE_WRITEBACK
+    - @ref OKL4_PAGE_CACHE_IWB_RWA_ONC
+    - @ref OKL4_PAGE_CACHE_WRITETHROUGH
+    - @ref OKL4_PAGE_CACHE_DEVICE_GRE
+    - @ref OKL4_PAGE_CACHE_DEVICE_NGRE
+    - @ref OKL4_PAGE_CACHE_DEVICE
+    - @ref OKL4_PAGE_CACHE_STRONG
+    - @ref OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE
+    - @ref OKL4_PAGE_CACHE_HW_MASK
+    - @ref OKL4_PAGE_CACHE_HW_DEVICE_NGNRE
+    - @ref OKL4_PAGE_CACHE_HW_DEVICE_NGRE
+    - @ref OKL4_PAGE_CACHE_HW_DEVICE_GRE
+    - @ref OKL4_PAGE_CACHE_HW_TWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_NC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_NC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_NC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_MAX
+    - @ref OKL4_PAGE_CACHE_INVALID
+*/
+
+typedef okl4_count_t okl4_page_cache_t;
+
+/*lint -esym(621, OKL4_PAGE_CACHE_WRITECOMBINE) */
+#define OKL4_PAGE_CACHE_WRITECOMBINE ((okl4_page_cache_t)0x0U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEFAULT) */
+#define OKL4_PAGE_CACHE_DEFAULT ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_IPC_RX) */
+#define OKL4_PAGE_CACHE_IPC_RX ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_IPC_TX) */
+#define OKL4_PAGE_CACHE_IPC_TX ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_TRACEBUFFER) */
+#define OKL4_PAGE_CACHE_TRACEBUFFER ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_WRITEBACK) */
+#define OKL4_PAGE_CACHE_WRITEBACK ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_IWB_RWA_ONC) */
+#define OKL4_PAGE_CACHE_IWB_RWA_ONC ((okl4_page_cache_t)0x2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_WRITETHROUGH) */
+#define OKL4_PAGE_CACHE_WRITETHROUGH ((okl4_page_cache_t)0x3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEVICE_GRE) */
+#define OKL4_PAGE_CACHE_DEVICE_GRE ((okl4_page_cache_t)0x4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEVICE_NGRE) */
+#define OKL4_PAGE_CACHE_DEVICE_NGRE ((okl4_page_cache_t)0x5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEVICE) */
+#define OKL4_PAGE_CACHE_DEVICE ((okl4_page_cache_t)0x6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_STRONG) */
+#define OKL4_PAGE_CACHE_STRONG ((okl4_page_cache_t)0x7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE ((okl4_page_cache_t)0x8000000U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_MASK) */
+#define OKL4_PAGE_CACHE_HW_MASK ((okl4_page_cache_t)0x8000000U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_NGNRE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_NGNRE ((okl4_page_cache_t)0x8000004U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_NGRE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_NGRE ((okl4_page_cache_t)0x8000008U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_GRE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_GRE ((okl4_page_cache_t)0x800000cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_WA_NSH ((okl4_page_cache_t)0x8000011U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000012U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000013U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH ((okl4_page_cache_t)0x8000014U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000015U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000016U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000017U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000018U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000019U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000021U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RA_NSH ((okl4_page_cache_t)0x8000022U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000023U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH ((okl4_page_cache_t)0x8000024U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000025U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000026U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000027U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000028U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000029U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000031U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000032U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RWA_NSH ((okl4_page_cache_t)0x8000033U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000034U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000035U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000036U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000037U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000038U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000039U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH ((okl4_page_cache_t)0x8000041U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH ((okl4_page_cache_t)0x8000042U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH ((okl4_page_cache_t)0x8000043U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_NC_NSH) */
+#define OKL4_PAGE_CACHE_HW_NC_NSH ((okl4_page_cache_t)0x8000044U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH ((okl4_page_cache_t)0x8000045U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH ((okl4_page_cache_t)0x8000046U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH ((okl4_page_cache_t)0x8000047U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH ((okl4_page_cache_t)0x8000048U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH ((okl4_page_cache_t)0x8000049U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH ((okl4_page_cache_t)0x800004aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH ((okl4_page_cache_t)0x800004bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH ((okl4_page_cache_t)0x800004cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH ((okl4_page_cache_t)0x800004dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH ((okl4_page_cache_t)0x800004eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH ((okl4_page_cache_t)0x800004fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000051U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000052U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000053U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH ((okl4_page_cache_t)0x8000054U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_WA_NSH ((okl4_page_cache_t)0x8000055U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000056U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000057U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000058U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000059U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000061U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000062U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000063U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH ((okl4_page_cache_t)0x8000064U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000065U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RA_NSH ((okl4_page_cache_t)0x8000066U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000067U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000068U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000069U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000071U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000072U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000073U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000074U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000075U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000076U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RWA_NSH ((okl4_page_cache_t)0x8000077U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000078U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000079U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH ((okl4_page_cache_t)0x8000081U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH ((okl4_page_cache_t)0x8000082U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x8000083U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH ((okl4_page_cache_t)0x8000084U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH ((okl4_page_cache_t)0x8000085U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH ((okl4_page_cache_t)0x8000086U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x8000087U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_NA_NSH ((okl4_page_cache_t)0x8000088U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH ((okl4_page_cache_t)0x8000089U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH ((okl4_page_cache_t)0x800008aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x800008bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH ((okl4_page_cache_t)0x800008cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH ((okl4_page_cache_t)0x800008dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH ((okl4_page_cache_t)0x800008eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x800008fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH ((okl4_page_cache_t)0x8000091U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH ((okl4_page_cache_t)0x8000092U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x8000093U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH ((okl4_page_cache_t)0x8000094U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH ((okl4_page_cache_t)0x8000095U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH ((okl4_page_cache_t)0x8000096U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x8000097U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH ((okl4_page_cache_t)0x8000098U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_WA_NSH ((okl4_page_cache_t)0x8000099U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH ((okl4_page_cache_t)0x800009aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x800009bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH ((okl4_page_cache_t)0x800009cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH ((okl4_page_cache_t)0x800009dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH ((okl4_page_cache_t)0x800009eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x800009fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH ((okl4_page_cache_t)0x80000a4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RA_NSH ((okl4_page_cache_t)0x80000aaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000abU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH ((okl4_page_cache_t)0x80000acU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000adU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH ((okl4_page_cache_t)0x80000aeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000afU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000baU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RWA_NSH ((okl4_page_cache_t)0x80000bbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000bcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000bdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000beU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000bfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH ((okl4_page_cache_t)0x80000c4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000caU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000cbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_NA_NSH ((okl4_page_cache_t)0x80000ccU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000cdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000ceU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000cfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH ((okl4_page_cache_t)0x80000d4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000daU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000dbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH ((okl4_page_cache_t)0x80000dcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_WA_NSH ((okl4_page_cache_t)0x80000ddU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000deU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000dfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH ((okl4_page_cache_t)0x80000e4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH ((okl4_page_cache_t)0x80000eaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000ebU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH ((okl4_page_cache_t)0x80000ecU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000edU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RA_NSH ((okl4_page_cache_t)0x80000eeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000efU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000faU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000fbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000fcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000fdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000feU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RWA_NSH ((okl4_page_cache_t)0x80000ffU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_WA_OSH ((okl4_page_cache_t)0x8000211U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000212U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000213U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH ((okl4_page_cache_t)0x8000214U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000215U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000216U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000217U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000218U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000219U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000221U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RA_OSH ((okl4_page_cache_t)0x8000222U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000223U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH ((okl4_page_cache_t)0x8000224U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000225U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000226U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000227U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000228U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000229U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000231U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000232U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RWA_OSH ((okl4_page_cache_t)0x8000233U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000234U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000235U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000236U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000237U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000238U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000239U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH ((okl4_page_cache_t)0x8000241U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH ((okl4_page_cache_t)0x8000242U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH ((okl4_page_cache_t)0x8000243U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_NC_OSH) */
+#define OKL4_PAGE_CACHE_HW_NC_OSH ((okl4_page_cache_t)0x8000244U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH ((okl4_page_cache_t)0x8000245U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH ((okl4_page_cache_t)0x8000246U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH ((okl4_page_cache_t)0x8000247U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH ((okl4_page_cache_t)0x8000248U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH ((okl4_page_cache_t)0x8000249U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH ((okl4_page_cache_t)0x800024aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH ((okl4_page_cache_t)0x800024bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH ((okl4_page_cache_t)0x800024cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH ((okl4_page_cache_t)0x800024dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH ((okl4_page_cache_t)0x800024eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH ((okl4_page_cache_t)0x800024fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000251U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000252U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000253U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH ((okl4_page_cache_t)0x8000254U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_WA_OSH ((okl4_page_cache_t)0x8000255U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000256U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000257U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000258U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000259U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000261U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000262U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000263U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH ((okl4_page_cache_t)0x8000264U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000265U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RA_OSH ((okl4_page_cache_t)0x8000266U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000267U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000268U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000269U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000271U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000272U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000273U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000274U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000275U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000276U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RWA_OSH ((okl4_page_cache_t)0x8000277U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000278U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000279U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH ((okl4_page_cache_t)0x8000281U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH ((okl4_page_cache_t)0x8000282U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x8000283U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH ((okl4_page_cache_t)0x8000284U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH ((okl4_page_cache_t)0x8000285U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH ((okl4_page_cache_t)0x8000286U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x8000287U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_NA_OSH ((okl4_page_cache_t)0x8000288U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH ((okl4_page_cache_t)0x8000289U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH ((okl4_page_cache_t)0x800028aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x800028bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH ((okl4_page_cache_t)0x800028cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH ((okl4_page_cache_t)0x800028dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH ((okl4_page_cache_t)0x800028eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x800028fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH ((okl4_page_cache_t)0x8000291U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH ((okl4_page_cache_t)0x8000292U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x8000293U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH ((okl4_page_cache_t)0x8000294U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH ((okl4_page_cache_t)0x8000295U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH ((okl4_page_cache_t)0x8000296U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x8000297U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH ((okl4_page_cache_t)0x8000298U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_WA_OSH ((okl4_page_cache_t)0x8000299U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH ((okl4_page_cache_t)0x800029aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x800029bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH ((okl4_page_cache_t)0x800029cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH ((okl4_page_cache_t)0x800029dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH ((okl4_page_cache_t)0x800029eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x800029fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH ((okl4_page_cache_t)0x80002a4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RA_OSH ((okl4_page_cache_t)0x80002aaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002abU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH ((okl4_page_cache_t)0x80002acU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002adU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH ((okl4_page_cache_t)0x80002aeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002afU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002baU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RWA_OSH ((okl4_page_cache_t)0x80002bbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002bcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002bdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002beU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002bfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH ((okl4_page_cache_t)0x80002c4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002caU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002cbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_NA_OSH ((okl4_page_cache_t)0x80002ccU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002cdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002ceU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002cfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH ((okl4_page_cache_t)0x80002d4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002daU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002dbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH ((okl4_page_cache_t)0x80002dcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_WA_OSH ((okl4_page_cache_t)0x80002ddU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002deU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002dfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH ((okl4_page_cache_t)0x80002e4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH ((okl4_page_cache_t)0x80002eaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002ebU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH ((okl4_page_cache_t)0x80002ecU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002edU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RA_OSH ((okl4_page_cache_t)0x80002eeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002efU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002faU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002fbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002fcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002fdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002feU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RWA_OSH ((okl4_page_cache_t)0x80002ffU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWT_WA_ISH ((okl4_page_cache_t)0x8000311U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000312U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000313U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH ((okl4_page_cache_t)0x8000314U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000315U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000316U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000317U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000318U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000319U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000321U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RA_ISH ((okl4_page_cache_t)0x8000322U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000323U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH ((okl4_page_cache_t)0x8000324U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000325U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000326U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000327U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000328U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000329U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000331U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000332U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RWA_ISH ((okl4_page_cache_t)0x8000333U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000334U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000335U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000336U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000337U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000338U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000339U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH ((okl4_page_cache_t)0x8000341U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH ((okl4_page_cache_t)0x8000342U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH ((okl4_page_cache_t)0x8000343U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_NC_ISH) */
+#define OKL4_PAGE_CACHE_HW_NC_ISH ((okl4_page_cache_t)0x8000344U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH ((okl4_page_cache_t)0x8000345U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH ((okl4_page_cache_t)0x8000346U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH ((okl4_page_cache_t)0x8000347U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH ((okl4_page_cache_t)0x8000348U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH ((okl4_page_cache_t)0x8000349U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH ((okl4_page_cache_t)0x800034aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH ((okl4_page_cache_t)0x800034bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH ((okl4_page_cache_t)0x800034cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH ((okl4_page_cache_t)0x800034dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH ((okl4_page_cache_t)0x800034eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH ((okl4_page_cache_t)0x800034fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000351U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000352U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000353U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH ((okl4_page_cache_t)0x8000354U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWB_WA_ISH ((okl4_page_cache_t)0x8000355U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000356U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000357U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000358U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000359U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000361U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000362U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000363U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH ((okl4_page_cache_t)0x8000364U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000365U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RA_ISH ((okl4_page_cache_t)0x8000366U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000367U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000368U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000369U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000371U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000372U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000373U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000374U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000375U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000376U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RWA_ISH ((okl4_page_cache_t)0x8000377U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000378U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000379U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH ((okl4_page_cache_t)0x8000381U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH ((okl4_page_cache_t)0x8000382U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x8000383U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH ((okl4_page_cache_t)0x8000384U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH ((okl4_page_cache_t)0x8000385U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH ((okl4_page_cache_t)0x8000386U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x8000387U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_NA_ISH ((okl4_page_cache_t)0x8000388U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH ((okl4_page_cache_t)0x8000389U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH ((okl4_page_cache_t)0x800038aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x800038bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH ((okl4_page_cache_t)0x800038cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH ((okl4_page_cache_t)0x800038dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH ((okl4_page_cache_t)0x800038eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x800038fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH ((okl4_page_cache_t)0x8000391U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH ((okl4_page_cache_t)0x8000392U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x8000393U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH ((okl4_page_cache_t)0x8000394U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH ((okl4_page_cache_t)0x8000395U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH ((okl4_page_cache_t)0x8000396U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x8000397U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH ((okl4_page_cache_t)0x8000398U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_WA_ISH ((okl4_page_cache_t)0x8000399U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH ((okl4_page_cache_t)0x800039aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x800039bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH ((okl4_page_cache_t)0x800039cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH ((okl4_page_cache_t)0x800039dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH ((okl4_page_cache_t)0x800039eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x800039fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH ((okl4_page_cache_t)0x80003a4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_RA_ISH ((okl4_page_cache_t)0x80003aaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003abU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH ((okl4_page_cache_t)0x80003acU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003adU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH ((okl4_page_cache_t)0x80003aeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003afU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003baU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_RWA_ISH ((okl4_page_cache_t)0x80003bbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003bcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003bdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003beU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003bfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH ((okl4_page_cache_t)0x80003c4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003caU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003cbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_NA_ISH ((okl4_page_cache_t)0x80003ccU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003cdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003ceU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003cfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH ((okl4_page_cache_t)0x80003d4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003daU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003dbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH ((okl4_page_cache_t)0x80003dcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_WA_ISH ((okl4_page_cache_t)0x80003ddU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003deU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003dfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH ((okl4_page_cache_t)0x80003e4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH ((okl4_page_cache_t)0x80003eaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003ebU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH ((okl4_page_cache_t)0x80003ecU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003edU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_RA_ISH ((okl4_page_cache_t)0x80003eeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003efU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003faU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003fbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003fcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003fdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003feU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_RWA_ISH ((okl4_page_cache_t)0x80003ffU)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_CACHE_MAX) */
+#define OKL4_PAGE_CACHE_MAX ((okl4_page_cache_t)0x80003ffU)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_CACHE_INVALID) */
+#define OKL4_PAGE_CACHE_INVALID ((okl4_page_cache_t)0xffffffffU)
+
+/*lint -esym(714, okl4_page_cache_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_cache_is_element_of(okl4_page_cache_t var);
+
+
+/*lint -esym(714, okl4_page_cache_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_cache_is_element_of(okl4_page_cache_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_PAGE_CACHE_WRITECOMBINE) ||
+            (var == OKL4_PAGE_CACHE_DEFAULT) ||
+            (var == OKL4_PAGE_CACHE_IPC_RX) ||
+            (var == OKL4_PAGE_CACHE_IPC_TX) ||
+            (var == OKL4_PAGE_CACHE_TRACEBUFFER) ||
+            (var == OKL4_PAGE_CACHE_WRITEBACK) ||
+            (var == OKL4_PAGE_CACHE_IWB_RWA_ONC) ||
+            (var == OKL4_PAGE_CACHE_WRITETHROUGH) ||
+            (var == OKL4_PAGE_CACHE_DEVICE_GRE) ||
+            (var == OKL4_PAGE_CACHE_DEVICE_NGRE) ||
+            (var == OKL4_PAGE_CACHE_DEVICE) ||
+            (var == OKL4_PAGE_CACHE_STRONG) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_DEVICE_NGNRE) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_DEVICE_GRE) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_NC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_DEVICE_NGRE) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_NC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_NC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_MASK));
+}
+
+
+
+typedef uint32_t okl4_cell_id_t;
+
+
+
+
+
+typedef char okl4_char_t;
+
+
+
+
+
+
+
+
+/**
+    The `okl4_string_t` type represents a constant C string of type
+    'const char *'.
+*/
+
+typedef const okl4_char_t *okl4_string_t;
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+
+*/
+
+struct okl4_range_item {
+    okl4_laddr_t base;
+    okl4_lsize_t size;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_virtmem_item {
+    struct okl4_range_item range;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management_item {
+    okl4_laddr_t entry;
+    struct okl4_virtmem_item mapping_range;
+    __ptr64(void *, data);
+    __ptr64(okl4_string_t, image);
+    okl4_kcap_t mmu;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(okl4_string_t, name);
+    okl4_kcap_t registers_cap;
+    okl4_kcap_t reset_virq;
+    okl4_count_t segment_index;
+    _okl4_padding_t __padding4_4;
+    _okl4_padding_t __padding5_5;
+    _okl4_padding_t __padding6_6;
+    _okl4_padding_t __padding7_7;
+    __ptr64(struct okl4_cell_management_segments *, segments);
+    __ptr64(struct okl4_cell_management_vcpus *, vcpus);
+    okl4_bool_t boot_once;
+    okl4_bool_t can_stop;
+    okl4_bool_t deferred;
+    okl4_bool_t detached;
+    okl4_bool_t erase;
+    _okl4_padding_t __padding8_5;
+    _okl4_padding_t __padding9_6;
+    _okl4_padding_t __padding10_7;
+    okl4_laddr_t dtb_address;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management {
+    okl4_count_t num_items;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct okl4_cell_management_item items[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    The `okl4_paddr_t` type represents an unsigned integer value which is large
+    enough to contain a machine-native physical address.
+*/
+
+typedef okl4_psize_t okl4_paddr_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_segment_mapping {
+    okl4_paddr_t phys_addr;
+    okl4_psize_t size;
+    okl4_laddr_t virt_addr;
+    okl4_kcap_t cap;
+    okl4_bool_t device;
+    okl4_bool_t owned;
+    _okl4_padding_t __padding0_6;
+    _okl4_padding_t __padding1_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management_segments {
+    okl4_count_t free_segments;
+    okl4_count_t num_segments;
+    struct okl4_segment_mapping segment_mappings[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management_vcpus {
+    okl4_count_t num_vcpus;
+    okl4_kcap_t vcpu_caps[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    CPU instruction set
+*/
+
+typedef uint32_t okl4_cpu_exec_mode;
+
+/*lint -esym(621, OKL4_ARM_MODE) */
+#define OKL4_ARM_MODE ((okl4_cpu_exec_mode)(0U))
+
+/*lint -esym(621, OKL4_DEFAULT_MODE) */
+#define OKL4_DEFAULT_MODE ((okl4_cpu_exec_mode)(4U))
+
+/*lint -esym(621, OKL4_JAZELLE_MODE) */
+#define OKL4_JAZELLE_MODE ((okl4_cpu_exec_mode)(2U))
+
+/*lint -esym(621, OKL4_THUMBEE_MODE) */
+#define OKL4_THUMBEE_MODE ((okl4_cpu_exec_mode)(3U))
+
+/*lint -esym(621, OKL4_THUMB_MODE) */
+#define OKL4_THUMB_MODE ((okl4_cpu_exec_mode)(1U))
+
+
+
+/**
+    CPU mode specifier
+
+    - BITS 2..0 -   @ref OKL4_MASK_EXEC_MODE_CPU_MODE
+    - BIT 7 -   @ref OKL4_MASK_ENDIAN_CPU_MODE
+*/
+
+/*lint -esym(621, okl4_cpu_mode_t) */
+typedef uint32_t okl4_cpu_mode_t;
+
+/*lint -esym(621, okl4_cpu_mode_getexecmode) */
+/*lint -esym(714, okl4_cpu_mode_getexecmode) */
+OKL4_FORCE_INLINE okl4_cpu_exec_mode
+okl4_cpu_mode_getexecmode(const okl4_cpu_mode_t *x);
+
+/*lint -esym(621, okl4_cpu_mode_setexecmode) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setexecmode(okl4_cpu_mode_t *x, okl4_cpu_exec_mode _exec_mode);
+
+/*lint -esym(621, okl4_cpu_mode_getendian) */
+/*lint -esym(714, okl4_cpu_mode_getendian) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_cpu_mode_getendian(const okl4_cpu_mode_t *x);
+
+/*lint -esym(621, okl4_cpu_mode_setendian) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setendian(okl4_cpu_mode_t *x, okl4_bool_t _endian);
+
+/*lint -esym(714, okl4_cpu_mode_init) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_init(okl4_cpu_mode_t *x);
+
+/*lint -esym(714, okl4_cpu_mode_cast) */
+OKL4_FORCE_INLINE okl4_cpu_mode_t
+okl4_cpu_mode_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_CPU_MODE_EXEC_MODE_MASK) */
+#define OKL4_CPU_MODE_EXEC_MODE_MASK ((okl4_cpu_mode_t)7U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EXEC_MODE_CPU_MODE) */
+#define OKL4_MASK_EXEC_MODE_CPU_MODE ((okl4_cpu_mode_t)7U)
+/*lint -esym(621, OKL4_SHIFT_EXEC_MODE_CPU_MODE) */
+#define OKL4_SHIFT_EXEC_MODE_CPU_MODE (0)
+/*lint -esym(621, OKL4_WIDTH_EXEC_MODE_CPU_MODE) */
+#define OKL4_WIDTH_EXEC_MODE_CPU_MODE (3)
+/*lint -esym(621, OKL4_CPU_MODE_ENDIAN_MASK) */
+#define OKL4_CPU_MODE_ENDIAN_MASK ((okl4_cpu_mode_t)1U << 7) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ENDIAN_CPU_MODE) */
+#define OKL4_MASK_ENDIAN_CPU_MODE ((okl4_cpu_mode_t)1U << 7)
+/*lint -esym(621, OKL4_SHIFT_ENDIAN_CPU_MODE) */
+#define OKL4_SHIFT_ENDIAN_CPU_MODE (7)
+/*lint -esym(621, OKL4_WIDTH_ENDIAN_CPU_MODE) */
+#define OKL4_WIDTH_ENDIAN_CPU_MODE (1)
+
+
+/*lint -sem(okl4_cpu_mode_getexecmode, 1p, @n >= 0 && @n <= 7) */
+/*lint -esym(621, okl4_cpu_mode_getexecmode) */
+/*lint -esym(714, okl4_cpu_mode_getexecmode) */
+OKL4_FORCE_INLINE okl4_cpu_exec_mode
+okl4_cpu_mode_getexecmode(const okl4_cpu_mode_t *x)
+{
+    okl4_cpu_exec_mode field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 3;
+        } bits;
+        okl4_cpu_mode_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_cpu_exec_mode)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_cpu_mode_setexecmode, 2n >= 0 && 2n <= 7) */
+/*lint -esym(714, okl4_cpu_mode_setexecmode) */
+
+/*lint -esym(621, okl4_cpu_mode_setexecmode) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setexecmode(okl4_cpu_mode_t *x, okl4_cpu_exec_mode _exec_mode)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 3;
+        } bits;
+        okl4_cpu_mode_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_exec_mode;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_cpu_mode_getendian, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_cpu_mode_getendian) */
+/*lint -esym(714, okl4_cpu_mode_getendian) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_cpu_mode_getendian(const okl4_cpu_mode_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_cpu_mode_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_cpu_mode_setendian, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_cpu_mode_setendian) */
+
+/*lint -esym(621, okl4_cpu_mode_setendian) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setendian(okl4_cpu_mode_t *x, okl4_bool_t _endian)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_cpu_mode_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_endian;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_cpu_mode_init) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_init(okl4_cpu_mode_t *x)
+{
+    *x = (okl4_cpu_mode_t)0U;
+}
+
+/*lint -esym(714, okl4_cpu_mode_cast) */
+OKL4_FORCE_INLINE okl4_cpu_mode_t
+okl4_cpu_mode_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_cpu_mode_t x = (okl4_cpu_mode_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct _okl4_env_hdr {
+    uint16_t magic;
+    uint16_t count;
+};
+
+
+
+
+
+
+
+struct _okl4_env_item {
+    __ptr64(okl4_string_t, name);
+    __ptr64(void *, item);
+};
+
+
+
+
+
+
+/**
+    The OKL4 environment.  It is a dictionary that maps strings to
+    arbitary objects.  The content of the environment is defined
+    during system construction time, and is read-only during run
+    time.
+*/
+
+struct _okl4_env {
+    struct _okl4_env_hdr env_hdr;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct _okl4_env_item env_item[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_access_cell {
+    __ptr64(okl4_string_t, name);
+    okl4_count_t num_entries;
+    okl4_count_t start_entry;
+};
+
+
+
+
+/**
+    The okl4_page_perms_t object represents a set of access permissions for
+    page mappings.
+
+    - @ref OKL4_PAGE_PERMS_NONE
+    - @ref OKL4_PAGE_PERMS_X
+    - @ref OKL4_PAGE_PERMS_W
+    - @ref OKL4_PAGE_PERMS_WX
+    - @ref OKL4_PAGE_PERMS_R
+    - @ref OKL4_PAGE_PERMS_RX
+    - @ref OKL4_PAGE_PERMS_RW
+    - @ref OKL4_PAGE_PERMS_RWX
+    - @ref OKL4_PAGE_PERMS_MAX
+    - @ref OKL4_PAGE_PERMS_INVALID
+*/
+
+typedef uint32_t okl4_page_perms_t;
+
+/*lint -esym(621, OKL4_PAGE_PERMS_NONE) */
+#define OKL4_PAGE_PERMS_NONE ((okl4_page_perms_t)0x0U)
+/*lint -esym(621, OKL4_PAGE_PERMS_X) */
+#define OKL4_PAGE_PERMS_X ((okl4_page_perms_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_PERMS_W) */
+#define OKL4_PAGE_PERMS_W ((okl4_page_perms_t)0x2U)
+/*lint -esym(621, OKL4_PAGE_PERMS_WX) */
+#define OKL4_PAGE_PERMS_WX ((okl4_page_perms_t)0x3U)
+/*lint -esym(621, OKL4_PAGE_PERMS_R) */
+#define OKL4_PAGE_PERMS_R ((okl4_page_perms_t)0x4U)
+/*lint -esym(621, OKL4_PAGE_PERMS_RX) */
+#define OKL4_PAGE_PERMS_RX ((okl4_page_perms_t)0x5U)
+/*lint -esym(621, OKL4_PAGE_PERMS_RW) */
+#define OKL4_PAGE_PERMS_RW ((okl4_page_perms_t)0x6U)
+/*lint -esym(621, OKL4_PAGE_PERMS_RWX) */
+#define OKL4_PAGE_PERMS_RWX ((okl4_page_perms_t)0x7U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_PERMS_MAX) */
+#define OKL4_PAGE_PERMS_MAX ((okl4_page_perms_t)0x7U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_PERMS_INVALID) */
+#define OKL4_PAGE_PERMS_INVALID ((okl4_page_perms_t)0xffffffffU)
+
+/*lint -esym(714, okl4_page_perms_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_perms_is_element_of(okl4_page_perms_t var);
+
+
+/*lint -esym(714, okl4_page_perms_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_perms_is_element_of(okl4_page_perms_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_PAGE_PERMS_NONE) ||
+            (var == OKL4_PAGE_PERMS_X) ||
+            (var == OKL4_PAGE_PERMS_W) ||
+            (var == OKL4_PAGE_PERMS_WX) ||
+            (var == OKL4_PAGE_PERMS_R) ||
+            (var == OKL4_PAGE_PERMS_RX) ||
+            (var == OKL4_PAGE_PERMS_RW) ||
+            (var == OKL4_PAGE_PERMS_RWX));
+}
+
+
+/**
+
+*/
+
+struct okl4_env_access_entry {
+    okl4_laddr_t virtual_address;
+    okl4_psize_t offset;
+    okl4_psize_t size;
+    okl4_count_t num_segs;
+    okl4_count_t segment_index;
+    okl4_page_cache_t cache_attrs;
+    okl4_page_perms_t permissions;
+    __ptr64(okl4_string_t, object_name);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_access_table {
+    okl4_count_t num_cells;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(struct okl4_env_access_cell *, cells);
+    __ptr64(struct okl4_env_access_entry *, entries);
+};
+
+
+
+
+/**
+    This object contains command-line arguments passed to
+    user-level programs.
+*/
+
+struct okl4_env_args {
+    okl4_count_t argc;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64_array(okl4_string_t, argv)[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    The okl4_env_interrupt_device_map_t type represents a list of interrupt
+    numbers (IRQs) that are connected to a given peripheral
+    device.  Objects of this type are typically obtained from
+    the OKL4 environment.
+*/
+
+struct okl4_env_interrupt_device_map {
+    okl4_count_t num_entries;
+    okl4_interrupt_number_t entries[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    The okl4_interrupt_t structure is used to represent a kernel interrupt
+    object.
+*/
+
+struct okl4_interrupt {
+    okl4_kcap_t kcap;
+};
+
+
+
+
+/**
+    The okl4_env_interrupt_handle_t type stores the information required to
+    perform operations on a interrupt.
+*/
+
+struct okl4_env_interrupt_handle {
+    okl4_interrupt_number_t descriptor;
+    struct okl4_interrupt interrupt;
+};
+
+
+
+
+/**
+    The okl4_env_interrupt_list_t type stores a list of interrupt handle objects
+    which represent all the interrupts that are available to the cell.
+    Objects of this type are typically obtained from
+    the OKL4 environment.
+*/
+
+struct okl4_env_interrupt_list {
+    okl4_count_t num_entries;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(okl4_interrupt_number_t *, descriptor);
+    __ptr64(struct okl4_interrupt *, interrupt);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_profile_cell {
+    okl4_char_t name[32];
+    okl4_count_t num_cores;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(struct okl4_env_profile_cpu *, core);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_profile_cpu {
+    okl4_kcap_t cap;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_profile_table {
+    okl4_count_t num_cell_entries;
+    okl4_count_t pcpu_cell_entry;
+    __ptr64(struct okl4_env_profile_cell *, cells);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_segment {
+    okl4_paddr_t base;
+    okl4_psize_t size;
+    okl4_kcap_t cap_id;
+    okl4_page_perms_t rwx;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_segment_table {
+    okl4_count_t num_segments;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct okl4_env_segment segments[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    The `okl4_error_t` type represents an error condition returned by the
+    OKL4 API.
+
+    See OKL4_ERROR_*
+
+    - @ref OKL4_ERROR_KSP_OK
+    - @ref OKL4_ERROR_OK
+    - @ref OKL4_ERROR_ALREADY_STARTED
+    - @ref OKL4_ERROR_ALREADY_STOPPED
+    - @ref OKL4_ERROR_AXON_AREA_TOO_BIG
+    - @ref OKL4_ERROR_AXON_BAD_MESSAGE_SIZE
+    - @ref OKL4_ERROR_AXON_INVALID_OFFSET
+    - @ref OKL4_ERROR_AXON_QUEUE_NOT_MAPPED
+    - @ref OKL4_ERROR_AXON_QUEUE_NOT_READY
+    - @ref OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED
+    - @ref OKL4_ERROR_CANCELLED
+    - @ref OKL4_ERROR_EXISTING_MAPPING
+    - @ref OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS
+    - @ref OKL4_ERROR_INTERRUPTED
+    - @ref OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED
+    - @ref OKL4_ERROR_INTERRUPT_INVALID_IRQ
+    - @ref OKL4_ERROR_INTERRUPT_NOT_ATTACHED
+    - @ref OKL4_ERROR_INVALID_ARGUMENT
+    - @ref OKL4_ERROR_INVALID_DESIGNATOR
+    - @ref OKL4_ERROR_INVALID_POWER_STATE
+    - @ref OKL4_ERROR_INVALID_SEGMENT_INDEX
+    - @ref OKL4_ERROR_MEMORY_FAULT
+    - @ref OKL4_ERROR_MISSING_MAPPING
+    - @ref OKL4_ERROR_NON_EMPTY_MMU_CONTEXT
+    - @ref OKL4_ERROR_NOT_IN_SEGMENT
+    - @ref OKL4_ERROR_NOT_LAST_CPU
+    - @ref OKL4_ERROR_NO_RESOURCES
+    - @ref OKL4_ERROR_PIPE_BAD_STATE
+    - @ref OKL4_ERROR_PIPE_EMPTY
+    - @ref OKL4_ERROR_PIPE_FULL
+    - @ref OKL4_ERROR_PIPE_NOT_READY
+    - @ref OKL4_ERROR_PIPE_RECV_OVERFLOW
+    - @ref OKL4_ERROR_POWER_VCPU_RESUMED
+    - @ref OKL4_ERROR_SEGMENT_USED
+    - @ref OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED
+    - @ref OKL4_ERROR_TIMER_ACTIVE
+    - @ref OKL4_ERROR_TIMER_CANCELLED
+    - @ref OKL4_ERROR_TRY_AGAIN
+    - @ref OKL4_ERROR_WOULD_BLOCK
+    - @ref OKL4_ERROR_ALLOC_EXHAUSTED
+    - @ref OKL4_ERROR_KSP_ERROR_0
+    - @ref OKL4_ERROR_KSP_ERROR_1
+    - @ref OKL4_ERROR_KSP_ERROR_2
+    - @ref OKL4_ERROR_KSP_ERROR_3
+    - @ref OKL4_ERROR_KSP_ERROR_4
+    - @ref OKL4_ERROR_KSP_ERROR_5
+    - @ref OKL4_ERROR_KSP_ERROR_6
+    - @ref OKL4_ERROR_KSP_ERROR_7
+    - @ref OKL4_ERROR_KSP_INVALID_ARG
+    - @ref OKL4_ERROR_KSP_NOT_IMPLEMENTED
+    - @ref OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS
+    - @ref OKL4_ERROR_KSP_INTERRUPT_REGISTERED
+    - @ref OKL4_ERROR_NOT_IMPLEMENTED
+    - @ref OKL4_ERROR_MAX
+*/
+
+typedef uint32_t okl4_error_t;
+
+/**
+    KSP returned OK
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_OK) */
+#define OKL4_ERROR_KSP_OK ((okl4_error_t)0x0U)
+/**
+    The operation succeeded
+*/
+/*lint -esym(621, OKL4_ERROR_OK) */
+#define OKL4_ERROR_OK ((okl4_error_t)0x0U)
+/**
+    The target vCPU was already running.
+*/
+/*lint -esym(621, OKL4_ERROR_ALREADY_STARTED) */
+#define OKL4_ERROR_ALREADY_STARTED ((okl4_error_t)0x1U)
+/**
+    The target vCPU was not running.
+*/
+/*lint -esym(621, OKL4_ERROR_ALREADY_STOPPED) */
+#define OKL4_ERROR_ALREADY_STOPPED ((okl4_error_t)0x2U)
+/*lint -esym(621, OKL4_ERROR_AXON_AREA_TOO_BIG) */
+#define OKL4_ERROR_AXON_AREA_TOO_BIG ((okl4_error_t)0x3U)
+/*lint -esym(621, OKL4_ERROR_AXON_BAD_MESSAGE_SIZE) */
+#define OKL4_ERROR_AXON_BAD_MESSAGE_SIZE ((okl4_error_t)0x4U)
+/*lint -esym(621, OKL4_ERROR_AXON_INVALID_OFFSET) */
+#define OKL4_ERROR_AXON_INVALID_OFFSET ((okl4_error_t)0x5U)
+/*lint -esym(621, OKL4_ERROR_AXON_QUEUE_NOT_MAPPED) */
+#define OKL4_ERROR_AXON_QUEUE_NOT_MAPPED ((okl4_error_t)0x6U)
+/*lint -esym(621, OKL4_ERROR_AXON_QUEUE_NOT_READY) */
+#define OKL4_ERROR_AXON_QUEUE_NOT_READY ((okl4_error_t)0x7U)
+/*lint -esym(621, OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED) */
+#define OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED ((okl4_error_t)0x8U)
+/**
+    A blocking operation was cancelled due to an abort of the operation.
+*/
+/*lint -esym(621, OKL4_ERROR_CANCELLED) */
+#define OKL4_ERROR_CANCELLED ((okl4_error_t)0x9U)
+/**
+    The operation failed due to an existing mapping.  Mapping
+    operations must not overlap an existing mapping.  Unmapping
+    must be performed at the same size as the original mapping.
+*/
+/*lint -esym(621, OKL4_ERROR_EXISTING_MAPPING) */
+#define OKL4_ERROR_EXISTING_MAPPING ((okl4_error_t)0xaU)
+/**
+    The operation requested with a segment failed due to
+    insufficient rights in the segment.
+*/
+/*lint -esym(621, OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS) */
+#define OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS ((okl4_error_t)0xbU)
+/**
+    The operation did not complete because it was interrupted by a
+    preemption.  This error value is only used internally.
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPTED) */
+#define OKL4_ERROR_INTERRUPTED ((okl4_error_t)0xcU)
+/**
+    Attempt to attach an interrupt to an IRQ number, when the
+    interrupt is already attached to an IRQ number
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED) */
+#define OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED ((okl4_error_t)0xdU)
+/**
+    Attempt to use an IRQ number that is out of range, of
+    the wrong type, or not in the correct state
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPT_INVALID_IRQ) */
+#define OKL4_ERROR_INTERRUPT_INVALID_IRQ ((okl4_error_t)0xeU)
+/**
+    Attempt to operate on an unknown IRQ number
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPT_NOT_ATTACHED) */
+#define OKL4_ERROR_INTERRUPT_NOT_ATTACHED ((okl4_error_t)0xfU)
+/**
+    An invalid argument was provided.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_ARGUMENT) */
+#define OKL4_ERROR_INVALID_ARGUMENT ((okl4_error_t)0x10U)
+/**
+    The operation failed because one of the arguments does not refer to a
+    valid object.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_DESIGNATOR) */
+#define OKL4_ERROR_INVALID_DESIGNATOR ((okl4_error_t)0x11U)
+/**
+    The operation failed because the power_state
+    argument is invalid.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_POWER_STATE) */
+#define OKL4_ERROR_INVALID_POWER_STATE ((okl4_error_t)0x12U)
+/**
+    The operation failed because the given segment index does
+    not correspond to an attached physical segment.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_SEGMENT_INDEX) */
+#define OKL4_ERROR_INVALID_SEGMENT_INDEX ((okl4_error_t)0x13U)
+/**
+    A user provided address produced a read or write fault in the operation.
+*/
+/*lint -esym(621, OKL4_ERROR_MEMORY_FAULT) */
+#define OKL4_ERROR_MEMORY_FAULT ((okl4_error_t)0x14U)
+/**
+    The operation failed because there is no mapping at the
+    specified location.
+*/
+/*lint -esym(621, OKL4_ERROR_MISSING_MAPPING) */
+#define OKL4_ERROR_MISSING_MAPPING ((okl4_error_t)0x15U)
+/**
+    The delete operation failed because the KMMU context is not
+    empty.
+*/
+/*lint -esym(621, OKL4_ERROR_NON_EMPTY_MMU_CONTEXT) */
+#define OKL4_ERROR_NON_EMPTY_MMU_CONTEXT ((okl4_error_t)0x16U)
+/**
+    The lookup operation failed because the given virtual address
+    of the given KMMU context is not mapped at the given physical
+    segment.
+*/
+/*lint -esym(621, OKL4_ERROR_NOT_IN_SEGMENT) */
+#define OKL4_ERROR_NOT_IN_SEGMENT ((okl4_error_t)0x17U)
+/**
+    The operation failed because the caller is not on the last
+    online cpu.
+*/
+/*lint -esym(621, OKL4_ERROR_NOT_LAST_CPU) */
+#define OKL4_ERROR_NOT_LAST_CPU ((okl4_error_t)0x18U)
+/**
+    Insufficient resources are available to perform the operation.
+*/
+/*lint -esym(621, OKL4_ERROR_NO_RESOURCES) */
+#define OKL4_ERROR_NO_RESOURCES ((okl4_error_t)0x19U)
+/**
+    Operation failed because pipe was not in the required state.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_BAD_STATE) */
+#define OKL4_ERROR_PIPE_BAD_STATE ((okl4_error_t)0x1aU)
+/**
+    Operation failed because no messages are in the queue.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_EMPTY) */
+#define OKL4_ERROR_PIPE_EMPTY ((okl4_error_t)0x1bU)
+/**
+    Operation failed because no memory is available in the queue.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_FULL) */
+#define OKL4_ERROR_PIPE_FULL ((okl4_error_t)0x1cU)
+/**
+    Operation failed because the pipe is in reset or not ready.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_NOT_READY) */
+#define OKL4_ERROR_PIPE_NOT_READY ((okl4_error_t)0x1dU)
+/**
+    Message was truncated because receive buffer size is too small.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_RECV_OVERFLOW) */
+#define OKL4_ERROR_PIPE_RECV_OVERFLOW ((okl4_error_t)0x1eU)
+/**
+    The operation failed because at least one VCPU has a monitored
+    power state and is not currently suspended.
+*/
+/*lint -esym(621, OKL4_ERROR_POWER_VCPU_RESUMED) */
+#define OKL4_ERROR_POWER_VCPU_RESUMED ((okl4_error_t)0x1fU)
+/**
+    The operation requires a segment to be unused, or not attached
+    to an MMU context.
+*/
+/*lint -esym(621, OKL4_ERROR_SEGMENT_USED) */
+#define OKL4_ERROR_SEGMENT_USED ((okl4_error_t)0x20U)
+/*lint -esym(621, OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) */
+#define OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED ((okl4_error_t)0x21U)
+/**
+    The timer is already active, and was not reprogrammed.
+*/
+/*lint -esym(621, OKL4_ERROR_TIMER_ACTIVE) */
+#define OKL4_ERROR_TIMER_ACTIVE ((okl4_error_t)0x22U)
+/**
+    The timer has already been cancelled or expired.
+*/
+/*lint -esym(621, OKL4_ERROR_TIMER_CANCELLED) */
+#define OKL4_ERROR_TIMER_CANCELLED ((okl4_error_t)0x23U)
+/**
+    Operation failed due to a temporary condition, and may be retried.
+*/
+/*lint -esym(621, OKL4_ERROR_TRY_AGAIN) */
+#define OKL4_ERROR_TRY_AGAIN ((okl4_error_t)0x24U)
+/**
+    The non-blocking operation failed because it would
+    block on a resource.
+*/
+/*lint -esym(621, OKL4_ERROR_WOULD_BLOCK) */
+#define OKL4_ERROR_WOULD_BLOCK ((okl4_error_t)0x25U)
+/**
+    Insufficient resources
+*/
+/*lint -esym(621, OKL4_ERROR_ALLOC_EXHAUSTED) */
+#define OKL4_ERROR_ALLOC_EXHAUSTED ((okl4_error_t)0x26U)
+/**
+    KSP specific error 0
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_0) */
+#define OKL4_ERROR_KSP_ERROR_0 ((okl4_error_t)0x10000010U)
+/**
+    KSP specific error 1
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_1) */
+#define OKL4_ERROR_KSP_ERROR_1 ((okl4_error_t)0x10000011U)
+/**
+    KSP specific error 2
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_2) */
+#define OKL4_ERROR_KSP_ERROR_2 ((okl4_error_t)0x10000012U)
+/**
+    KSP specific error 3
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_3) */
+#define OKL4_ERROR_KSP_ERROR_3 ((okl4_error_t)0x10000013U)
+/**
+    KSP specific error 4
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_4) */
+#define OKL4_ERROR_KSP_ERROR_4 ((okl4_error_t)0x10000014U)
+/**
+    KSP specific error 5
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_5) */
+#define OKL4_ERROR_KSP_ERROR_5 ((okl4_error_t)0x10000015U)
+/**
+    KSP specific error 6
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_6) */
+#define OKL4_ERROR_KSP_ERROR_6 ((okl4_error_t)0x10000016U)
+/**
+    KSP specific error 7
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_7) */
+#define OKL4_ERROR_KSP_ERROR_7 ((okl4_error_t)0x10000017U)
+/**
+    Invalid argument to KSP
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_INVALID_ARG) */
+#define OKL4_ERROR_KSP_INVALID_ARG ((okl4_error_t)0x80000001U)
+/**
+    KSP doesn't implement requested feature
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_NOT_IMPLEMENTED) */
+#define OKL4_ERROR_KSP_NOT_IMPLEMENTED ((okl4_error_t)0x80000002U)
+/**
+    User didn't supply rights for requested feature
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS) */
+#define OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS ((okl4_error_t)0x80000003U)
+/**
+    Interrupt already registered
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_INTERRUPT_REGISTERED) */
+#define OKL4_ERROR_KSP_INTERRUPT_REGISTERED ((okl4_error_t)0x80000004U)
+/**
+    Requested operation is not implemented.
+*/
+/*lint -esym(621, OKL4_ERROR_NOT_IMPLEMENTED) */
+#define OKL4_ERROR_NOT_IMPLEMENTED ((okl4_error_t)0xffffffffU)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ERROR_MAX) */
+#define OKL4_ERROR_MAX ((okl4_error_t)0xffffffffU)
+
+/*lint -esym(714, okl4_error_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_error_is_element_of(okl4_error_t var);
+
+
+/*lint -esym(714, okl4_error_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_error_is_element_of(okl4_error_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ERROR_ALREADY_STARTED) ||
+            (var == OKL4_ERROR_ALREADY_STOPPED) ||
+            (var == OKL4_ERROR_AXON_AREA_TOO_BIG) ||
+            (var == OKL4_ERROR_AXON_BAD_MESSAGE_SIZE) ||
+            (var == OKL4_ERROR_AXON_INVALID_OFFSET) ||
+            (var == OKL4_ERROR_AXON_QUEUE_NOT_MAPPED) ||
+            (var == OKL4_ERROR_AXON_QUEUE_NOT_READY) ||
+            (var == OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED) ||
+            (var == OKL4_ERROR_CANCELLED) ||
+            (var == OKL4_ERROR_EXISTING_MAPPING) ||
+            (var == OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS) ||
+            (var == OKL4_ERROR_INTERRUPTED) ||
+            (var == OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED) ||
+            (var == OKL4_ERROR_INTERRUPT_INVALID_IRQ) ||
+            (var == OKL4_ERROR_INTERRUPT_NOT_ATTACHED) ||
+            (var == OKL4_ERROR_INVALID_ARGUMENT) ||
+            (var == OKL4_ERROR_INVALID_DESIGNATOR) ||
+            (var == OKL4_ERROR_INVALID_POWER_STATE) ||
+            (var == OKL4_ERROR_INVALID_SEGMENT_INDEX) ||
+            (var == OKL4_ERROR_KSP_ERROR_0) ||
+            (var == OKL4_ERROR_KSP_ERROR_1) ||
+            (var == OKL4_ERROR_KSP_ERROR_2) ||
+            (var == OKL4_ERROR_KSP_ERROR_3) ||
+            (var == OKL4_ERROR_KSP_ERROR_4) ||
+            (var == OKL4_ERROR_KSP_ERROR_5) ||
+            (var == OKL4_ERROR_KSP_ERROR_6) ||
+            (var == OKL4_ERROR_KSP_ERROR_7) ||
+            (var == OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS) ||
+            (var == OKL4_ERROR_KSP_INTERRUPT_REGISTERED) ||
+            (var == OKL4_ERROR_KSP_INVALID_ARG) ||
+            (var == OKL4_ERROR_KSP_NOT_IMPLEMENTED) ||
+            (var == OKL4_ERROR_KSP_OK) ||
+            (var == OKL4_ERROR_MEMORY_FAULT) ||
+            (var == OKL4_ERROR_MISSING_MAPPING) ||
+            (var == OKL4_ERROR_NON_EMPTY_MMU_CONTEXT) ||
+            (var == OKL4_ERROR_NOT_IMPLEMENTED) ||
+            (var == OKL4_ERROR_NOT_IN_SEGMENT) ||
+            (var == OKL4_ERROR_NOT_LAST_CPU) ||
+            (var == OKL4_ERROR_NO_RESOURCES) ||
+            (var == OKL4_ERROR_OK) ||
+            (var == OKL4_ERROR_PIPE_BAD_STATE) ||
+            (var == OKL4_ERROR_PIPE_EMPTY) ||
+            (var == OKL4_ERROR_PIPE_FULL) ||
+            (var == OKL4_ERROR_PIPE_NOT_READY) ||
+            (var == OKL4_ERROR_PIPE_RECV_OVERFLOW) ||
+            (var == OKL4_ERROR_POWER_VCPU_RESUMED) ||
+            (var == OKL4_ERROR_SEGMENT_USED) ||
+            (var == OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) ||
+            (var == OKL4_ERROR_TIMER_ACTIVE) ||
+            (var == OKL4_ERROR_TIMER_CANCELLED) ||
+            (var == OKL4_ERROR_TRY_AGAIN) ||
+            (var == OKL4_ERROR_WOULD_BLOCK) ||
+            (var == OKL4_ERROR_ALLOC_EXHAUSTED));
+}
+
+
+/**
+
+*/
+
+struct okl4_firmware_segment {
+    okl4_laddr_t copy_addr;
+    okl4_laddr_t exec_addr;
+    okl4_lsize_t filesz;
+    okl4_lsize_t memsz_diff;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_firmware_segments_info {
+    okl4_count_t num_segments;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct okl4_firmware_segment segments[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    - BIT 1 -   @ref OKL4_MASK_EDGE_GICD_ICFGR
+*/
+
+/*lint -esym(621, okl4_gicd_icfgr_t) */
+typedef uint32_t okl4_gicd_icfgr_t;
+
+/*lint -esym(621, okl4_gicd_icfgr_getedge) */
+/*lint -esym(714, okl4_gicd_icfgr_getedge) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_icfgr_getedge(const okl4_gicd_icfgr_t *x);
+
+/*lint -esym(621, okl4_gicd_icfgr_setedge) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_setedge(okl4_gicd_icfgr_t *x, okl4_bool_t _edge);
+
+/*lint -esym(714, okl4_gicd_icfgr_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_init(okl4_gicd_icfgr_t *x);
+
+/*lint -esym(714, okl4_gicd_icfgr_cast) */
+OKL4_FORCE_INLINE okl4_gicd_icfgr_t
+okl4_gicd_icfgr_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_GICD_ICFGR_EDGE_MASK) */
+#define OKL4_GICD_ICFGR_EDGE_MASK ((okl4_gicd_icfgr_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EDGE_GICD_ICFGR) */
+#define OKL4_MASK_EDGE_GICD_ICFGR ((okl4_gicd_icfgr_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_EDGE_GICD_ICFGR) */
+#define OKL4_SHIFT_EDGE_GICD_ICFGR (1)
+/*lint -esym(621, OKL4_WIDTH_EDGE_GICD_ICFGR) */
+#define OKL4_WIDTH_EDGE_GICD_ICFGR (1)
+
+
+/*lint -sem(okl4_gicd_icfgr_getedge, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_gicd_icfgr_getedge) */
+/*lint -esym(714, okl4_gicd_icfgr_getedge) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_icfgr_getedge(const okl4_gicd_icfgr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_gicd_icfgr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_icfgr_setedge, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_gicd_icfgr_setedge) */
+
+/*lint -esym(621, okl4_gicd_icfgr_setedge) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_setedge(okl4_gicd_icfgr_t *x, okl4_bool_t _edge)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_gicd_icfgr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_edge;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_gicd_icfgr_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_init(okl4_gicd_icfgr_t *x)
+{
+    *x = (okl4_gicd_icfgr_t)0U;
+}
+
+/*lint -esym(714, okl4_gicd_icfgr_cast) */
+OKL4_FORCE_INLINE okl4_gicd_icfgr_t
+okl4_gicd_icfgr_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_gicd_icfgr_t x = (okl4_gicd_icfgr_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+typedef uint32_t okl4_sgi_target_t;
+
+/*lint -esym(621, OKL4_SGI_TARGET_LISTED) */
+#define OKL4_SGI_TARGET_LISTED ((okl4_sgi_target_t)0x0U)
+/*lint -esym(621, OKL4_SGI_TARGET_ALL_OTHERS) */
+#define OKL4_SGI_TARGET_ALL_OTHERS ((okl4_sgi_target_t)0x1U)
+/*lint -esym(621, OKL4_SGI_TARGET_SELF) */
+#define OKL4_SGI_TARGET_SELF ((okl4_sgi_target_t)0x2U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_SGI_TARGET_MAX) */
+#define OKL4_SGI_TARGET_MAX ((okl4_sgi_target_t)0x2U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_SGI_TARGET_INVALID) */
+#define OKL4_SGI_TARGET_INVALID ((okl4_sgi_target_t)0xffffffffU)
+
+/*lint -esym(714, okl4_sgi_target_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_sgi_target_is_element_of(okl4_sgi_target_t var);
+
+
+/*lint -esym(714, okl4_sgi_target_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_sgi_target_is_element_of(okl4_sgi_target_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_SGI_TARGET_LISTED) ||
+            (var == OKL4_SGI_TARGET_ALL_OTHERS) ||
+            (var == OKL4_SGI_TARGET_SELF));
+}
+
+
+/**
+    - BITS 3..0 -   @ref OKL4_MASK_SGIINTID_GICD_SGIR
+    - BIT 15 -   @ref OKL4_MASK_NSATT_GICD_SGIR
+    - BITS 23..16 -   @ref OKL4_MASK_CPUTARGETLIST_GICD_SGIR
+    - BITS 25..24 -   @ref OKL4_MASK_TARGETLISTFILTER_GICD_SGIR
+*/
+
+/*lint -esym(621, okl4_gicd_sgir_t) */
+typedef uint32_t okl4_gicd_sgir_t;
+
+/*lint -esym(621, okl4_gicd_sgir_getsgiintid) */
+/*lint -esym(714, okl4_gicd_sgir_getsgiintid) */
+OKL4_FORCE_INLINE okl4_interrupt_number_t
+okl4_gicd_sgir_getsgiintid(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_setsgiintid) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setsgiintid(okl4_gicd_sgir_t *x, okl4_interrupt_number_t _sgiintid);
+
+/*lint -esym(621, okl4_gicd_sgir_getnsatt) */
+/*lint -esym(714, okl4_gicd_sgir_getnsatt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_sgir_getnsatt(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_setnsatt) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setnsatt(okl4_gicd_sgir_t *x, okl4_bool_t _nsatt);
+
+/*lint -esym(621, okl4_gicd_sgir_getcputargetlist) */
+/*lint -esym(714, okl4_gicd_sgir_getcputargetlist) */
+OKL4_FORCE_INLINE uint8_t
+okl4_gicd_sgir_getcputargetlist(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_setcputargetlist) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setcputargetlist(okl4_gicd_sgir_t *x, uint8_t _cputargetlist);
+
+/*lint -esym(621, okl4_gicd_sgir_gettargetlistfilter) */
+/*lint -esym(714, okl4_gicd_sgir_gettargetlistfilter) */
+OKL4_FORCE_INLINE okl4_sgi_target_t
+okl4_gicd_sgir_gettargetlistfilter(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_settargetlistfilter) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_settargetlistfilter(okl4_gicd_sgir_t *x, okl4_sgi_target_t _targetlistfilter);
+
+/*lint -esym(714, okl4_gicd_sgir_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_init(okl4_gicd_sgir_t *x);
+
+/*lint -esym(714, okl4_gicd_sgir_cast) */
+OKL4_FORCE_INLINE okl4_gicd_sgir_t
+okl4_gicd_sgir_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_GICD_SGIR_SGIINTID_MASK) */
+#define OKL4_GICD_SGIR_SGIINTID_MASK ((okl4_gicd_sgir_t)15U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SGIINTID_GICD_SGIR) */
+#define OKL4_MASK_SGIINTID_GICD_SGIR ((okl4_gicd_sgir_t)15U)
+/*lint -esym(621, OKL4_SHIFT_SGIINTID_GICD_SGIR) */
+#define OKL4_SHIFT_SGIINTID_GICD_SGIR (0)
+/*lint -esym(621, OKL4_WIDTH_SGIINTID_GICD_SGIR) */
+#define OKL4_WIDTH_SGIINTID_GICD_SGIR (4)
+/*lint -esym(621, OKL4_GICD_SGIR_NSATT_MASK) */
+#define OKL4_GICD_SGIR_NSATT_MASK ((okl4_gicd_sgir_t)1U << 15) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_NSATT_GICD_SGIR) */
+#define OKL4_MASK_NSATT_GICD_SGIR ((okl4_gicd_sgir_t)1U << 15)
+/*lint -esym(621, OKL4_SHIFT_NSATT_GICD_SGIR) */
+#define OKL4_SHIFT_NSATT_GICD_SGIR (15)
+/*lint -esym(621, OKL4_WIDTH_NSATT_GICD_SGIR) */
+#define OKL4_WIDTH_NSATT_GICD_SGIR (1)
+/*lint -esym(621, OKL4_GICD_SGIR_CPUTARGETLIST_MASK) */
+#define OKL4_GICD_SGIR_CPUTARGETLIST_MASK ((okl4_gicd_sgir_t)255U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_MASK_CPUTARGETLIST_GICD_SGIR ((okl4_gicd_sgir_t)255U << 16)
+/*lint -esym(621, OKL4_SHIFT_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_SHIFT_CPUTARGETLIST_GICD_SGIR (16)
+/*lint -esym(621, OKL4_WIDTH_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_WIDTH_CPUTARGETLIST_GICD_SGIR (8)
+/*lint -esym(621, OKL4_GICD_SGIR_TARGETLISTFILTER_MASK) */
+#define OKL4_GICD_SGIR_TARGETLISTFILTER_MASK ((okl4_gicd_sgir_t)3U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_MASK_TARGETLISTFILTER_GICD_SGIR ((okl4_gicd_sgir_t)3U << 24)
+/*lint -esym(621, OKL4_SHIFT_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_SHIFT_TARGETLISTFILTER_GICD_SGIR (24)
+/*lint -esym(621, OKL4_WIDTH_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_WIDTH_TARGETLISTFILTER_GICD_SGIR (2)
+
+
+/*lint -sem(okl4_gicd_sgir_getsgiintid, 1p, @n >= 0 && @n <= 15) */
+/*lint -esym(621, okl4_gicd_sgir_getsgiintid) */
+/*lint -esym(714, okl4_gicd_sgir_getsgiintid) */
+OKL4_FORCE_INLINE okl4_interrupt_number_t
+okl4_gicd_sgir_getsgiintid(const okl4_gicd_sgir_t *x)
+{
+    okl4_interrupt_number_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 4;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_interrupt_number_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_setsgiintid, 2n >= 0 && 2n <= 15) */
+/*lint -esym(714, okl4_gicd_sgir_setsgiintid) */
+
+/*lint -esym(621, okl4_gicd_sgir_setsgiintid) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setsgiintid(okl4_gicd_sgir_t *x, okl4_interrupt_number_t _sgiintid)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 4;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_sgiintid;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_gicd_sgir_getnsatt, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_gicd_sgir_getnsatt) */
+/*lint -esym(714, okl4_gicd_sgir_getnsatt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_sgir_getnsatt(const okl4_gicd_sgir_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 15;
+            _Bool field : 1;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_setnsatt, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_gicd_sgir_setnsatt) */
+
+/*lint -esym(621, okl4_gicd_sgir_setnsatt) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setnsatt(okl4_gicd_sgir_t *x, okl4_bool_t _nsatt)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 15;
+            _Bool field : 1;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_nsatt;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_gicd_sgir_getcputargetlist, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_gicd_sgir_getcputargetlist) */
+/*lint -esym(714, okl4_gicd_sgir_getcputargetlist) */
+OKL4_FORCE_INLINE uint8_t
+okl4_gicd_sgir_getcputargetlist(const okl4_gicd_sgir_t *x)
+{
+    uint8_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 8;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint8_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_setcputargetlist, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_gicd_sgir_setcputargetlist) */
+
+/*lint -esym(621, okl4_gicd_sgir_setcputargetlist) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setcputargetlist(okl4_gicd_sgir_t *x, uint8_t _cputargetlist)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 8;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_cputargetlist;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_gicd_sgir_gettargetlistfilter, 1p, @n >= 0 && @n <= 3) */
+/*lint -esym(621, okl4_gicd_sgir_gettargetlistfilter) */
+/*lint -esym(714, okl4_gicd_sgir_gettargetlistfilter) */
+OKL4_FORCE_INLINE okl4_sgi_target_t
+okl4_gicd_sgir_gettargetlistfilter(const okl4_gicd_sgir_t *x)
+{
+    okl4_sgi_target_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 2;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_sgi_target_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_settargetlistfilter, 2n >= 0 && 2n <= 3) */
+/*lint -esym(714, okl4_gicd_sgir_settargetlistfilter) */
+
+/*lint -esym(621, okl4_gicd_sgir_settargetlistfilter) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_settargetlistfilter(okl4_gicd_sgir_t *x, okl4_sgi_target_t _targetlistfilter)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 2;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_targetlistfilter;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_gicd_sgir_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_init(okl4_gicd_sgir_t *x)
+{
+    *x = (okl4_gicd_sgir_t)32768U;
+}
+
+/*lint -esym(714, okl4_gicd_sgir_cast) */
+OKL4_FORCE_INLINE okl4_gicd_sgir_t
+okl4_gicd_sgir_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_gicd_sgir_t x = (okl4_gicd_sgir_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+    The okl4_kmmu_t structure is used to represent a kernel MMU
+    context.
+*/
+
+struct okl4_kmmu {
+    okl4_kcap_t kcap;
+};
+
+
+
+
+/**
+    The `okl4_ksp_arg_t` type represents an unsigned, machine-native
+    register-sized integer value used for KSP call arguments. Important: it is
+    truncated to guest register-size when guest register-size is smaller than
+    kernel register-size.
+*/
+
+typedef okl4_register_t okl4_ksp_arg_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_ksp_user_agent {
+    okl4_kcap_t kcap;
+    okl4_interrupt_number_t virq;
+};
+
+
+
+
+
+typedef uint32_t okl4_ksp_vdevice_class_t;
+
+
+
+
+
+typedef okl4_register_t okl4_laddr_pn_t;
+
+
+
+
+
+typedef okl4_register_t okl4_laddr_tr_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_pipe_data {
+    okl4_kcap_t kcap;
+    okl4_irq_t virq;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_pipe_ep_data {
+    struct okl4_pipe_data rx;
+    struct okl4_pipe_data tx;
+};
+
+
+
+
+
+typedef uint32_t okl4_link_role_t;
+
+/*lint -esym(621, OKL4_LINK_ROLE_SYMMETRIC) */
+#define OKL4_LINK_ROLE_SYMMETRIC ((okl4_link_role_t)0x0U)
+/*lint -esym(621, OKL4_LINK_ROLE_SERVER) */
+#define OKL4_LINK_ROLE_SERVER ((okl4_link_role_t)0x1U)
+/*lint -esym(621, OKL4_LINK_ROLE_CLIENT) */
+#define OKL4_LINK_ROLE_CLIENT ((okl4_link_role_t)0x2U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_ROLE_MAX) */
+#define OKL4_LINK_ROLE_MAX ((okl4_link_role_t)0x2U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_ROLE_INVALID) */
+#define OKL4_LINK_ROLE_INVALID ((okl4_link_role_t)0xffffffffU)
+
+/*lint -esym(714, okl4_link_role_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_role_is_element_of(okl4_link_role_t var);
+
+
+/*lint -esym(714, okl4_link_role_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_role_is_element_of(okl4_link_role_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_LINK_ROLE_SYMMETRIC) ||
+            (var == OKL4_LINK_ROLE_SERVER) ||
+            (var == OKL4_LINK_ROLE_CLIENT));
+}
+
+
+
+typedef uint32_t okl4_link_transport_type_t;
+
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_LINK_TRANSPORT_TYPE_SHARED_BUFFER ((okl4_link_transport_type_t)0x0U)
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_AXONS) */
+#define OKL4_LINK_TRANSPORT_TYPE_AXONS ((okl4_link_transport_type_t)0x1U)
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_PIPES) */
+#define OKL4_LINK_TRANSPORT_TYPE_PIPES ((okl4_link_transport_type_t)0x2U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_MAX) */
+#define OKL4_LINK_TRANSPORT_TYPE_MAX ((okl4_link_transport_type_t)0x2U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_INVALID) */
+#define OKL4_LINK_TRANSPORT_TYPE_INVALID ((okl4_link_transport_type_t)0xffffffffU)
+
+/*lint -esym(714, okl4_link_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_transport_type_is_element_of(okl4_link_transport_type_t var);
+
+
+/*lint -esym(714, okl4_link_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_transport_type_is_element_of(okl4_link_transport_type_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_LINK_TRANSPORT_TYPE_SHARED_BUFFER) ||
+            (var == OKL4_LINK_TRANSPORT_TYPE_AXONS) ||
+            (var == OKL4_LINK_TRANSPORT_TYPE_PIPES));
+}
+
+
+/**
+
+*/
+
+struct okl4_link {
+    __ptr64(okl4_string_t, name);
+    __ptr64(void *, opaque);
+    __ptr64(okl4_string_t, partner_name);
+    okl4_link_role_t role;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    union {
+        struct {
+            struct okl4_virtmem_item buffer;
+            okl4_irq_t virq_in;
+            okl4_kcap_t virq_out;
+        } shared_buffer;
+
+        struct {
+            struct okl4_axon_ep_data axon_ep;
+            okl4_ksize_t message_size;
+            okl4_count_t queue_length;
+            _okl4_padding_t __padding0_4; /**< Padding 8 */
+            _okl4_padding_t __padding1_5; /**< Padding 8 */
+            _okl4_padding_t __padding2_6; /**< Padding 8 */
+            _okl4_padding_t __padding3_7; /**< Padding 8 */
+        } axons;
+
+        struct {
+            okl4_ksize_t message_size;
+            struct okl4_pipe_ep_data pipe_ep;
+            okl4_count_t queue_length;
+            _okl4_padding_t __padding0_4; /**< Padding 8 */
+            _okl4_padding_t __padding1_5; /**< Padding 8 */
+            _okl4_padding_t __padding2_6; /**< Padding 8 */
+            _okl4_padding_t __padding3_7; /**< Padding 8 */
+        } pipes;
+
+    } transport;
+
+    okl4_link_transport_type_t transport_type;
+    _okl4_padding_t __padding4_4;
+    _okl4_padding_t __padding5_5;
+    _okl4_padding_t __padding6_6;
+    _okl4_padding_t __padding7_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_links {
+    okl4_count_t num_links;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64_array(struct okl4_link *, links)[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+typedef okl4_register_t okl4_lsize_pn_t;
+
+
+
+
+
+typedef okl4_register_t okl4_lsize_tr_t;
+
+
+
+
+/**
+    The okl4_machine_info_t structure holds machine-specific
+    constants that are only known at weave-time. Objects of this
+    type are typically obtained from the OKL4 environment.
+*/
+
+struct okl4_machine_info {
+    okl4_ksize_t l1_cache_line_size;
+    okl4_ksize_t l2_cache_line_size;
+    okl4_count_t num_cpus;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_merged_physpool {
+    okl4_paddr_t phys_addr;
+    okl4_count_t num_segments;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct okl4_virtmem_item segments[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+typedef uint32_t okl4_microseconds_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_microvisor_timer {
+    okl4_kcap_t kcap;
+    okl4_irq_t virq;
+};
+
+
+
+
+/**
+    - BITS 15..0 -   @ref OKL4_MASK_ERROR_MMU_LOOKUP_INDEX
+    - BITS 31..16 -   @ref OKL4_MASK_INDEX_MMU_LOOKUP_INDEX
+*/
+
+/*lint -esym(621, okl4_mmu_lookup_index_t) */
+typedef uint32_t okl4_mmu_lookup_index_t;
+
+/*lint -esym(621, okl4_mmu_lookup_index_geterror) */
+/*lint -esym(714, okl4_mmu_lookup_index_geterror) */
+OKL4_FORCE_INLINE okl4_error_t
+okl4_mmu_lookup_index_geterror(const okl4_mmu_lookup_index_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_index_seterror) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_seterror(okl4_mmu_lookup_index_t *x, okl4_error_t _error);
+
+/*lint -esym(621, okl4_mmu_lookup_index_getindex) */
+/*lint -esym(714, okl4_mmu_lookup_index_getindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_index_getindex(const okl4_mmu_lookup_index_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_index_setindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_setindex(okl4_mmu_lookup_index_t *x, okl4_count_t _index);
+
+/*lint -esym(714, okl4_mmu_lookup_index_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_init(okl4_mmu_lookup_index_t *x);
+
+/*lint -esym(714, okl4_mmu_lookup_index_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_index_t
+okl4_mmu_lookup_index_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_MMU_LOOKUP_INDEX_ERROR_MASK) */
+#define OKL4_MMU_LOOKUP_INDEX_ERROR_MASK ((okl4_mmu_lookup_index_t)65535U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_MASK_ERROR_MMU_LOOKUP_INDEX ((okl4_mmu_lookup_index_t)65535U)
+/*lint -esym(621, OKL4_SHIFT_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_SHIFT_ERROR_MMU_LOOKUP_INDEX (0)
+/*lint -esym(621, OKL4_WIDTH_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_WIDTH_ERROR_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_MMU_LOOKUP_INDEX_INDEX_MASK) */
+#define OKL4_MMU_LOOKUP_INDEX_INDEX_MASK ((okl4_mmu_lookup_index_t)65535U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_MASK_INDEX_MMU_LOOKUP_INDEX ((okl4_mmu_lookup_index_t)65535U << 16)
+/*lint -esym(621, OKL4_SHIFT_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_SHIFT_INDEX_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_WIDTH_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_WIDTH_INDEX_MMU_LOOKUP_INDEX (16)
+
+
+/*lint -sem(okl4_mmu_lookup_index_geterror, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_mmu_lookup_index_geterror) */
+/*lint -esym(714, okl4_mmu_lookup_index_geterror) */
+OKL4_FORCE_INLINE okl4_error_t
+okl4_mmu_lookup_index_geterror(const okl4_mmu_lookup_index_t *x)
+{
+    okl4_error_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_mmu_lookup_index_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_error_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_index_seterror, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_mmu_lookup_index_seterror) */
+
+/*lint -esym(621, okl4_mmu_lookup_index_seterror) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_seterror(okl4_mmu_lookup_index_t *x, okl4_error_t _error)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_mmu_lookup_index_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_error;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_mmu_lookup_index_getindex, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_mmu_lookup_index_getindex) */
+/*lint -esym(714, okl4_mmu_lookup_index_getindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_index_getindex(const okl4_mmu_lookup_index_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        okl4_mmu_lookup_index_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_index_setindex, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_mmu_lookup_index_setindex) */
+
+/*lint -esym(621, okl4_mmu_lookup_index_setindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_setindex(okl4_mmu_lookup_index_t *x, okl4_count_t _index)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        okl4_mmu_lookup_index_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_index;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_mmu_lookup_index_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_init(okl4_mmu_lookup_index_t *x)
+{
+    *x = (okl4_mmu_lookup_index_t)0U;
+}
+
+/*lint -esym(714, okl4_mmu_lookup_index_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_index_t
+okl4_mmu_lookup_index_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_mmu_lookup_index_t x = (okl4_mmu_lookup_index_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BITS 9..0 -   @ref OKL4_MASK_SEG_INDEX_MMU_LOOKUP_SIZE
+    - BITS 63..10 -   @ref OKL4_MASK_SIZE_10_MMU_LOOKUP_SIZE
+*/
+
+/*lint -esym(621, okl4_mmu_lookup_size_t) */
+typedef okl4_register_t okl4_mmu_lookup_size_t;
+
+/*lint -esym(621, okl4_mmu_lookup_size_getsegindex) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsegindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_size_getsegindex(const okl4_mmu_lookup_size_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsegindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsegindex(okl4_mmu_lookup_size_t *x, okl4_count_t _seg_index);
+
+/*lint -esym(621, okl4_mmu_lookup_size_getsize10) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsize10) */
+OKL4_FORCE_INLINE okl4_register_t
+okl4_mmu_lookup_size_getsize10(const okl4_mmu_lookup_size_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsize10) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsize10(okl4_mmu_lookup_size_t *x, okl4_register_t _size_10);
+
+/*lint -esym(714, okl4_mmu_lookup_size_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_init(okl4_mmu_lookup_size_t *x);
+
+/*lint -esym(714, okl4_mmu_lookup_size_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_size_t
+okl4_mmu_lookup_size_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_MMU_LOOKUP_SIZE_SEG_INDEX_MASK) */
+#define OKL4_MMU_LOOKUP_SIZE_SEG_INDEX_MASK ((okl4_mmu_lookup_size_t)1023U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_MASK_SEG_INDEX_MMU_LOOKUP_SIZE ((okl4_mmu_lookup_size_t)1023U)
+/*lint -esym(621, OKL4_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE (0)
+/*lint -esym(621, OKL4_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_MMU_LOOKUP_SIZE_SIZE_10_MASK) */
+#define OKL4_MMU_LOOKUP_SIZE_SIZE_10_MASK ((okl4_mmu_lookup_size_t)18014398509481983U << 10) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_MASK_SIZE_10_MMU_LOOKUP_SIZE ((okl4_mmu_lookup_size_t)18014398509481983U << 10)
+/*lint -esym(621, OKL4_SHIFT_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_SHIFT_SIZE_10_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_WIDTH_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_WIDTH_SIZE_10_MMU_LOOKUP_SIZE (54)
+
+
+/*lint -sem(okl4_mmu_lookup_size_getsegindex, 1p, @n >= 0 && @n <= 1023) */
+/*lint -esym(621, okl4_mmu_lookup_size_getsegindex) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsegindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_size_getsegindex(const okl4_mmu_lookup_size_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t field : 10;
+        } bits;
+        okl4_mmu_lookup_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_size_setsegindex, 2n >= 0 && 2n <= 1023) */
+/*lint -esym(714, okl4_mmu_lookup_size_setsegindex) */
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsegindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsegindex(okl4_mmu_lookup_size_t *x, okl4_count_t _seg_index)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t field : 10;
+        } bits;
+        okl4_mmu_lookup_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_seg_index;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_mmu_lookup_size_getsize10, 1p, @n >= 0 && @n <= 18014398509481983) */
+/*lint -esym(621, okl4_mmu_lookup_size_getsize10) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsize10) */
+OKL4_FORCE_INLINE okl4_register_t
+okl4_mmu_lookup_size_getsize10(const okl4_mmu_lookup_size_t *x)
+{
+    okl4_register_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 10;
+            uint64_t field : 54;
+        } bits;
+        okl4_mmu_lookup_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_register_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_size_setsize10, 2n >= 0 && 2n <= 18014398509481983) */
+/*lint -esym(714, okl4_mmu_lookup_size_setsize10) */
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsize10) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsize10(okl4_mmu_lookup_size_t *x, okl4_register_t _size_10)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 10;
+            uint64_t field : 54;
+        } bits;
+        okl4_mmu_lookup_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_size_10;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_mmu_lookup_size_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_init(okl4_mmu_lookup_size_t *x)
+{
+    *x = (okl4_mmu_lookup_size_t)0U;
+}
+
+/*lint -esym(714, okl4_mmu_lookup_size_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_size_t
+okl4_mmu_lookup_size_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_mmu_lookup_size_t x = (okl4_mmu_lookup_size_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+typedef uint64_t okl4_nanoseconds_t;
+
+/** Timer period upper bound is (1 << 55) ns */
+/*lint -esym(621, OKL4_TIMER_MAX_PERIOD_NS) */
+#define OKL4_TIMER_MAX_PERIOD_NS ((okl4_nanoseconds_t)(36028797018963968U))
+
+/** Timer period lower bound is 1000000 ns */
+/*lint -esym(621, OKL4_TIMER_MIN_PERIOD_NS) */
+#define OKL4_TIMER_MIN_PERIOD_NS ((okl4_nanoseconds_t)(1000000U))
+
+
+
+/**
+    - BITS 2..0 -   @ref _OKL4_MASK_RWX_PAGE_ATTRIBUTE
+    - BITS 31..4 -   @ref _OKL4_MASK_ATTRIB_PAGE_ATTRIBUTE
+*/
+
+/*lint -esym(621, _okl4_page_attribute_t) */
+typedef uint32_t _okl4_page_attribute_t;
+
+/*lint -esym(621, _okl4_page_attribute_getrwx) */
+/*lint -esym(714, _okl4_page_attribute_getrwx) */
+OKL4_FORCE_INLINE okl4_page_perms_t
+_okl4_page_attribute_getrwx(const _okl4_page_attribute_t *x);
+
+/*lint -esym(621, _okl4_page_attribute_setrwx) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setrwx(_okl4_page_attribute_t *x, okl4_page_perms_t _rwx);
+
+/*lint -esym(621, _okl4_page_attribute_getattrib) */
+/*lint -esym(714, _okl4_page_attribute_getattrib) */
+OKL4_FORCE_INLINE okl4_page_cache_t
+_okl4_page_attribute_getattrib(const _okl4_page_attribute_t *x);
+
+/*lint -esym(621, _okl4_page_attribute_setattrib) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setattrib(_okl4_page_attribute_t *x, okl4_page_cache_t _attrib);
+
+/*lint -esym(714, _okl4_page_attribute_init) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_init(_okl4_page_attribute_t *x);
+
+/*lint -esym(714, _okl4_page_attribute_cast) */
+OKL4_FORCE_INLINE _okl4_page_attribute_t
+_okl4_page_attribute_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, _OKL4_PAGE_ATTRIBUTE_RWX_MASK) */
+#define _OKL4_PAGE_ATTRIBUTE_RWX_MASK ((_okl4_page_attribute_t)7U) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_MASK_RWX_PAGE_ATTRIBUTE ((_okl4_page_attribute_t)7U)
+/*lint -esym(621, _OKL4_SHIFT_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_SHIFT_RWX_PAGE_ATTRIBUTE (0)
+/*lint -esym(621, _OKL4_WIDTH_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_WIDTH_RWX_PAGE_ATTRIBUTE (3)
+/*lint -esym(621, _OKL4_PAGE_ATTRIBUTE_ATTRIB_MASK) */
+#define _OKL4_PAGE_ATTRIBUTE_ATTRIB_MASK ((_okl4_page_attribute_t)268435455U << 4) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_MASK_ATTRIB_PAGE_ATTRIBUTE ((_okl4_page_attribute_t)268435455U << 4)
+/*lint -esym(621, _OKL4_SHIFT_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_SHIFT_ATTRIB_PAGE_ATTRIBUTE (4)
+/*lint -esym(621, _OKL4_WIDTH_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_WIDTH_ATTRIB_PAGE_ATTRIBUTE (28)
+
+
+/*lint -sem(_okl4_page_attribute_getrwx, 1p, @n >= 0 && @n <= 7) */
+/*lint -esym(621, _okl4_page_attribute_getrwx) */
+/*lint -esym(714, _okl4_page_attribute_getrwx) */
+OKL4_FORCE_INLINE okl4_page_perms_t
+_okl4_page_attribute_getrwx(const _okl4_page_attribute_t *x)
+{
+    okl4_page_perms_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 3;
+        } bits;
+        _okl4_page_attribute_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_page_perms_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_page_attribute_setrwx, 2n >= 0 && 2n <= 7) */
+/*lint -esym(714, _okl4_page_attribute_setrwx) */
+
+/*lint -esym(621, _okl4_page_attribute_setrwx) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setrwx(_okl4_page_attribute_t *x, okl4_page_perms_t _rwx)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 3;
+        } bits;
+        _okl4_page_attribute_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_rwx;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_page_attribute_getattrib, 1p, @n >= 0 && @n <= 268435455) */
+/*lint -esym(621, _okl4_page_attribute_getattrib) */
+/*lint -esym(714, _okl4_page_attribute_getattrib) */
+OKL4_FORCE_INLINE okl4_page_cache_t
+_okl4_page_attribute_getattrib(const _okl4_page_attribute_t *x)
+{
+    okl4_page_cache_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            uint32_t field : 28;
+        } bits;
+        _okl4_page_attribute_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_page_cache_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_page_attribute_setattrib, 2n >= 0 && 2n <= 268435455) */
+/*lint -esym(714, _okl4_page_attribute_setattrib) */
+
+/*lint -esym(621, _okl4_page_attribute_setattrib) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setattrib(_okl4_page_attribute_t *x, okl4_page_cache_t _attrib)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            uint32_t field : 28;
+        } bits;
+        _okl4_page_attribute_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_attrib;
+    *x = _conv.raw;
+}
+/*lint -esym(714, _okl4_page_attribute_init) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_init(_okl4_page_attribute_t *x)
+{
+    *x = (_okl4_page_attribute_t)0U;
+}
+
+/*lint -esym(714, _okl4_page_attribute_cast) */
+OKL4_FORCE_INLINE _okl4_page_attribute_t
+_okl4_page_attribute_cast(uint32_t p, okl4_bool_t force)
+{
+    _okl4_page_attribute_t x = (_okl4_page_attribute_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_DO_OP_PIPE_CONTROL
+    - BITS 3..1 -   @ref OKL4_MASK_OPERATION_PIPE_CONTROL
+*/
+
+/*lint -esym(621, okl4_pipe_control_t) */
+typedef uint8_t okl4_pipe_control_t;
+
+/*lint -esym(621, okl4_pipe_control_getdoop) */
+/*lint -esym(714, okl4_pipe_control_getdoop) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_control_getdoop(const okl4_pipe_control_t *x);
+
+/*lint -esym(621, okl4_pipe_control_setdoop) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setdoop(okl4_pipe_control_t *x, okl4_bool_t _do_op);
+
+/*lint -esym(621, okl4_pipe_control_getoperation) */
+/*lint -esym(714, okl4_pipe_control_getoperation) */
+OKL4_FORCE_INLINE uint8_t
+okl4_pipe_control_getoperation(const okl4_pipe_control_t *x);
+
+/*lint -esym(621, okl4_pipe_control_setoperation) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setoperation(okl4_pipe_control_t *x, uint8_t _operation);
+
+/*lint -esym(714, okl4_pipe_control_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_init(okl4_pipe_control_t *x);
+
+/*lint -esym(714, okl4_pipe_control_cast) */
+OKL4_FORCE_INLINE okl4_pipe_control_t
+okl4_pipe_control_cast(uint8_t p, okl4_bool_t force);
+
+
+
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_CLR_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_CLR_HALTED ((okl4_pipe_control_t)(4U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_RESET) */
+#define OKL4_PIPE_CONTROL_OP_RESET ((okl4_pipe_control_t)(0U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_SET_HALTED ((okl4_pipe_control_t)(3U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_RX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_RX_READY ((okl4_pipe_control_t)(2U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_TX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_TX_READY ((okl4_pipe_control_t)(1U))
+
+/*lint -esym(621, OKL4_PIPE_CONTROL_DO_OP_MASK) */
+#define OKL4_PIPE_CONTROL_DO_OP_MASK (okl4_pipe_control_t)(1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_DO_OP_PIPE_CONTROL) */
+#define OKL4_MASK_DO_OP_PIPE_CONTROL (okl4_pipe_control_t)(1U)
+/*lint -esym(621, OKL4_SHIFT_DO_OP_PIPE_CONTROL) */
+#define OKL4_SHIFT_DO_OP_PIPE_CONTROL (0)
+/*lint -esym(621, OKL4_WIDTH_DO_OP_PIPE_CONTROL) */
+#define OKL4_WIDTH_DO_OP_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OPERATION_MASK) */
+#define OKL4_PIPE_CONTROL_OPERATION_MASK (okl4_pipe_control_t)(7U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OPERATION_PIPE_CONTROL) */
+#define OKL4_MASK_OPERATION_PIPE_CONTROL (okl4_pipe_control_t)(7U << 1)
+/*lint -esym(621, OKL4_SHIFT_OPERATION_PIPE_CONTROL) */
+#define OKL4_SHIFT_OPERATION_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_WIDTH_OPERATION_PIPE_CONTROL) */
+#define OKL4_WIDTH_OPERATION_PIPE_CONTROL (3)
+
+
+/*lint -sem(okl4_pipe_control_getdoop, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_control_getdoop) */
+/*lint -esym(714, okl4_pipe_control_getdoop) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_control_getdoop(const okl4_pipe_control_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_control_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_control_setdoop, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_control_setdoop) */
+
+/*lint -esym(621, okl4_pipe_control_setdoop) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setdoop(okl4_pipe_control_t *x, okl4_bool_t _do_op)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_control_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_do_op;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_control_getoperation, 1p, @n >= 0 && @n <= 7) */
+/*lint -esym(621, okl4_pipe_control_getoperation) */
+/*lint -esym(714, okl4_pipe_control_getoperation) */
+OKL4_FORCE_INLINE uint8_t
+okl4_pipe_control_getoperation(const okl4_pipe_control_t *x)
+{
+    uint8_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            uint32_t field : 3;
+        } bits;
+        okl4_pipe_control_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint8_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_control_setoperation, 2n >= 0 && 2n <= 7) */
+/*lint -esym(714, okl4_pipe_control_setoperation) */
+
+/*lint -esym(621, okl4_pipe_control_setoperation) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setoperation(okl4_pipe_control_t *x, uint8_t _operation)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            uint32_t field : 3;
+        } bits;
+        okl4_pipe_control_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_operation;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_pipe_control_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_init(okl4_pipe_control_t *x)
+{
+    *x = (okl4_pipe_control_t)0U;
+}
+
+/*lint -esym(714, okl4_pipe_control_cast) */
+OKL4_FORCE_INLINE okl4_pipe_control_t
+okl4_pipe_control_cast(uint8_t p, okl4_bool_t force)
+{
+    okl4_pipe_control_t x = (okl4_pipe_control_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_RESET_PIPE_STATE
+    - BIT 1 -   @ref OKL4_MASK_HALTED_PIPE_STATE
+    - BIT 2 -   @ref OKL4_MASK_RX_READY_PIPE_STATE
+    - BIT 3 -   @ref OKL4_MASK_TX_READY_PIPE_STATE
+    - BIT 4 -   @ref OKL4_MASK_RX_AVAILABLE_PIPE_STATE
+    - BIT 5 -   @ref OKL4_MASK_TX_AVAILABLE_PIPE_STATE
+    - BIT 6 -   @ref OKL4_MASK_WAITING_PIPE_STATE
+    - BIT 7 -   @ref OKL4_MASK_OVERQUOTA_PIPE_STATE
+*/
+
+/*lint -esym(621, okl4_pipe_state_t) */
+typedef uint8_t okl4_pipe_state_t;
+
+/*lint -esym(621, okl4_pipe_state_getreset) */
+/*lint -esym(714, okl4_pipe_state_getreset) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getreset(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setreset) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setreset(okl4_pipe_state_t *x, okl4_bool_t _reset);
+
+/*lint -esym(621, okl4_pipe_state_gethalted) */
+/*lint -esym(714, okl4_pipe_state_gethalted) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gethalted(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_sethalted) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_sethalted(okl4_pipe_state_t *x, okl4_bool_t _halted);
+
+/*lint -esym(621, okl4_pipe_state_getrxready) */
+/*lint -esym(714, okl4_pipe_state_getrxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxready(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setrxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxready(okl4_pipe_state_t *x, okl4_bool_t _rx_ready);
+
+/*lint -esym(621, okl4_pipe_state_gettxready) */
+/*lint -esym(714, okl4_pipe_state_gettxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxready(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_settxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxready(okl4_pipe_state_t *x, okl4_bool_t _tx_ready);
+
+/*lint -esym(621, okl4_pipe_state_getrxavailable) */
+/*lint -esym(714, okl4_pipe_state_getrxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxavailable(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setrxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxavailable(okl4_pipe_state_t *x, okl4_bool_t _rx_available);
+
+/*lint -esym(621, okl4_pipe_state_gettxavailable) */
+/*lint -esym(714, okl4_pipe_state_gettxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxavailable(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_settxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxavailable(okl4_pipe_state_t *x, okl4_bool_t _tx_available);
+
+/*lint -esym(621, okl4_pipe_state_getwaiting) */
+/*lint -esym(714, okl4_pipe_state_getwaiting) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getwaiting(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setwaiting) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setwaiting(okl4_pipe_state_t *x, okl4_bool_t _waiting);
+
+/*lint -esym(621, okl4_pipe_state_getoverquota) */
+/*lint -esym(714, okl4_pipe_state_getoverquota) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getoverquota(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setoverquota) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setoverquota(okl4_pipe_state_t *x, okl4_bool_t _overquota);
+
+/*lint -esym(714, okl4_pipe_state_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_init(okl4_pipe_state_t *x);
+
+/*lint -esym(714, okl4_pipe_state_cast) */
+OKL4_FORCE_INLINE okl4_pipe_state_t
+okl4_pipe_state_cast(uint8_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_PIPE_STATE_RESET_MASK) */
+#define OKL4_PIPE_STATE_RESET_MASK (okl4_pipe_state_t)(1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESET_PIPE_STATE) */
+#define OKL4_MASK_RESET_PIPE_STATE (okl4_pipe_state_t)(1U)
+/*lint -esym(621, OKL4_SHIFT_RESET_PIPE_STATE) */
+#define OKL4_SHIFT_RESET_PIPE_STATE (0)
+/*lint -esym(621, OKL4_WIDTH_RESET_PIPE_STATE) */
+#define OKL4_WIDTH_RESET_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_HALTED_MASK) */
+#define OKL4_PIPE_STATE_HALTED_MASK (okl4_pipe_state_t)(1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_HALTED_PIPE_STATE) */
+#define OKL4_MASK_HALTED_PIPE_STATE (okl4_pipe_state_t)(1U << 1)
+/*lint -esym(621, OKL4_SHIFT_HALTED_PIPE_STATE) */
+#define OKL4_SHIFT_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_WIDTH_HALTED_PIPE_STATE) */
+#define OKL4_WIDTH_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_RX_READY_MASK) */
+#define OKL4_PIPE_STATE_RX_READY_MASK (okl4_pipe_state_t)(1U << 2) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RX_READY_PIPE_STATE) */
+#define OKL4_MASK_RX_READY_PIPE_STATE (okl4_pipe_state_t)(1U << 2)
+/*lint -esym(621, OKL4_SHIFT_RX_READY_PIPE_STATE) */
+#define OKL4_SHIFT_RX_READY_PIPE_STATE (2)
+/*lint -esym(621, OKL4_WIDTH_RX_READY_PIPE_STATE) */
+#define OKL4_WIDTH_RX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_TX_READY_MASK) */
+#define OKL4_PIPE_STATE_TX_READY_MASK (okl4_pipe_state_t)(1U << 3) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TX_READY_PIPE_STATE) */
+#define OKL4_MASK_TX_READY_PIPE_STATE (okl4_pipe_state_t)(1U << 3)
+/*lint -esym(621, OKL4_SHIFT_TX_READY_PIPE_STATE) */
+#define OKL4_SHIFT_TX_READY_PIPE_STATE (3)
+/*lint -esym(621, OKL4_WIDTH_TX_READY_PIPE_STATE) */
+#define OKL4_WIDTH_TX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_RX_AVAILABLE_MASK) */
+#define OKL4_PIPE_STATE_RX_AVAILABLE_MASK (okl4_pipe_state_t)(1U << 4) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_MASK_RX_AVAILABLE_PIPE_STATE (okl4_pipe_state_t)(1U << 4)
+/*lint -esym(621, OKL4_SHIFT_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_SHIFT_RX_AVAILABLE_PIPE_STATE (4)
+/*lint -esym(621, OKL4_WIDTH_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_WIDTH_RX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_TX_AVAILABLE_MASK) */
+#define OKL4_PIPE_STATE_TX_AVAILABLE_MASK (okl4_pipe_state_t)(1U << 5) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_MASK_TX_AVAILABLE_PIPE_STATE (okl4_pipe_state_t)(1U << 5)
+/*lint -esym(621, OKL4_SHIFT_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_SHIFT_TX_AVAILABLE_PIPE_STATE (5)
+/*lint -esym(621, OKL4_WIDTH_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_WIDTH_TX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_WAITING_MASK) */
+#define OKL4_PIPE_STATE_WAITING_MASK (okl4_pipe_state_t)(1U << 6) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_WAITING_PIPE_STATE) */
+#define OKL4_MASK_WAITING_PIPE_STATE (okl4_pipe_state_t)(1U << 6)
+/*lint -esym(621, OKL4_SHIFT_WAITING_PIPE_STATE) */
+#define OKL4_SHIFT_WAITING_PIPE_STATE (6)
+/*lint -esym(621, OKL4_WIDTH_WAITING_PIPE_STATE) */
+#define OKL4_WIDTH_WAITING_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_OVERQUOTA_MASK) */
+#define OKL4_PIPE_STATE_OVERQUOTA_MASK (okl4_pipe_state_t)(1U << 7) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OVERQUOTA_PIPE_STATE) */
+#define OKL4_MASK_OVERQUOTA_PIPE_STATE (okl4_pipe_state_t)(1U << 7)
+/*lint -esym(621, OKL4_SHIFT_OVERQUOTA_PIPE_STATE) */
+#define OKL4_SHIFT_OVERQUOTA_PIPE_STATE (7)
+/*lint -esym(621, OKL4_WIDTH_OVERQUOTA_PIPE_STATE) */
+#define OKL4_WIDTH_OVERQUOTA_PIPE_STATE (1)
+
+
+/*lint -sem(okl4_pipe_state_getreset, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getreset) */
+/*lint -esym(714, okl4_pipe_state_getreset) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getreset(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setreset, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setreset) */
+
+/*lint -esym(621, okl4_pipe_state_setreset) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setreset(okl4_pipe_state_t *x, okl4_bool_t _reset)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_reset;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_gethalted, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_gethalted) */
+/*lint -esym(714, okl4_pipe_state_gethalted) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gethalted(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_sethalted, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_sethalted) */
+
+/*lint -esym(621, okl4_pipe_state_sethalted) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_sethalted(okl4_pipe_state_t *x, okl4_bool_t _halted)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_halted;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getrxready, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getrxready) */
+/*lint -esym(714, okl4_pipe_state_getrxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxready(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setrxready, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setrxready) */
+
+/*lint -esym(621, okl4_pipe_state_setrxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxready(okl4_pipe_state_t *x, okl4_bool_t _rx_ready)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_rx_ready;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_gettxready, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_gettxready) */
+/*lint -esym(714, okl4_pipe_state_gettxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxready(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_settxready, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_settxready) */
+
+/*lint -esym(621, okl4_pipe_state_settxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxready(okl4_pipe_state_t *x, okl4_bool_t _tx_ready)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_tx_ready;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getrxavailable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getrxavailable) */
+/*lint -esym(714, okl4_pipe_state_getrxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxavailable(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setrxavailable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setrxavailable) */
+
+/*lint -esym(621, okl4_pipe_state_setrxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxavailable(okl4_pipe_state_t *x, okl4_bool_t _rx_available)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_rx_available;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_gettxavailable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_gettxavailable) */
+/*lint -esym(714, okl4_pipe_state_gettxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxavailable(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_settxavailable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_settxavailable) */
+
+/*lint -esym(621, okl4_pipe_state_settxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxavailable(okl4_pipe_state_t *x, okl4_bool_t _tx_available)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_tx_available;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getwaiting, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getwaiting) */
+/*lint -esym(714, okl4_pipe_state_getwaiting) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getwaiting(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 6;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setwaiting, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setwaiting) */
+
+/*lint -esym(621, okl4_pipe_state_setwaiting) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setwaiting(okl4_pipe_state_t *x, okl4_bool_t _waiting)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 6;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_waiting;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getoverquota, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getoverquota) */
+/*lint -esym(714, okl4_pipe_state_getoverquota) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getoverquota(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setoverquota, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setoverquota) */
+
+/*lint -esym(621, okl4_pipe_state_setoverquota) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setoverquota(okl4_pipe_state_t *x, okl4_bool_t _overquota)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_overquota;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_pipe_state_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_init(okl4_pipe_state_t *x)
+{
+    *x = (okl4_pipe_state_t)1U;
+}
+
+/*lint -esym(714, okl4_pipe_state_cast) */
+OKL4_FORCE_INLINE okl4_pipe_state_t
+okl4_pipe_state_cast(uint8_t p, okl4_bool_t force)
+{
+    okl4_pipe_state_t x = (okl4_pipe_state_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+typedef uint32_t okl4_power_state_t;
+
+/*lint -esym(621, OKL4_POWER_STATE_IDLE) */
+#define OKL4_POWER_STATE_IDLE ((okl4_power_state_t)(0U))
+
+/*lint -esym(621, OKL4_POWER_STATE_PLATFORM_BASE) */
+#define OKL4_POWER_STATE_PLATFORM_BASE ((okl4_power_state_t)(256U))
+
+/*lint -esym(621, OKL4_POWER_STATE_POWEROFF) */
+#define OKL4_POWER_STATE_POWEROFF ((okl4_power_state_t)(1U))
+
+
+
+/**
+    The okl4_priority_t type represents a thread scheduling priority.
+    Valid prioritys range from [0, CONFIG\_SCHEDULER\_NUM\_PRIOS).
+*/
+
+typedef int8_t okl4_priority_t;
+
+
+
+
+
+typedef okl4_register_t okl4_psize_pn_t;
+
+
+
+
+
+typedef okl4_register_t okl4_psize_tr_t;
+
+
+
+
+/**
+    The okl4_register_set_t type is an enumeration identifying one of
+    the register sets supported by the host machine. This includes the
+    general-purpose registers, along with other CPU-specific register
+    sets such as floating point or vector registers.
+
+    - @ref OKL4_REGISTER_SET_CPU_REGS
+    - @ref OKL4_REGISTER_SET_VFP_REGS
+    - @ref OKL4_REGISTER_SET_VFP_CTRL_REGS
+    - @ref OKL4_REGISTER_SET_VFP64_REGS
+    - @ref OKL4_REGISTER_SET_VFP128_REGS
+    - @ref OKL4_REGISTER_SET_MAX
+    - @ref OKL4_REGISTER_SET_INVALID
+*/
+
+typedef uint32_t okl4_register_set_t;
+
+/*lint -esym(621, OKL4_REGISTER_SET_CPU_REGS) */
+#define OKL4_REGISTER_SET_CPU_REGS ((okl4_register_set_t)0x0U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP_REGS) */
+#define OKL4_REGISTER_SET_VFP_REGS ((okl4_register_set_t)0x1U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP_CTRL_REGS) */
+#define OKL4_REGISTER_SET_VFP_CTRL_REGS ((okl4_register_set_t)0x2U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP64_REGS) */
+#define OKL4_REGISTER_SET_VFP64_REGS ((okl4_register_set_t)0x3U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP128_REGS) */
+#define OKL4_REGISTER_SET_VFP128_REGS ((okl4_register_set_t)0x4U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_REGISTER_SET_MAX) */
+#define OKL4_REGISTER_SET_MAX ((okl4_register_set_t)0x4U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_REGISTER_SET_INVALID) */
+#define OKL4_REGISTER_SET_INVALID ((okl4_register_set_t)0xffffffffU)
+
+/*lint -esym(714, okl4_register_set_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_register_set_is_element_of(okl4_register_set_t var);
+
+
+/*lint -esym(714, okl4_register_set_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_register_set_is_element_of(okl4_register_set_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_REGISTER_SET_CPU_REGS) ||
+            (var == OKL4_REGISTER_SET_VFP_REGS) ||
+            (var == OKL4_REGISTER_SET_VFP_CTRL_REGS) ||
+            (var == OKL4_REGISTER_SET_VFP64_REGS) ||
+            (var == OKL4_REGISTER_SET_VFP128_REGS));
+}
+
+
+
+typedef okl4_psize_t okl4_vsize_t;
+
+
+
+
+/**
+    The okl4_register_and_set_t type is a bitfield containing a register
+    set identifier of type okl4_register_set_t, and an index into that
+    register set.
+
+    - BITS 15..0 -   @ref OKL4_MASK_OFFSET_REGISTER_AND_SET
+    - BITS 31..16 -   @ref OKL4_MASK_SET_REGISTER_AND_SET
+*/
+
+/*lint -esym(621, okl4_register_and_set_t) */
+typedef uint32_t okl4_register_and_set_t;
+
+/*lint -esym(621, okl4_register_and_set_getoffset) */
+/*lint -esym(714, okl4_register_and_set_getoffset) */
+OKL4_FORCE_INLINE okl4_vsize_t
+okl4_register_and_set_getoffset(const okl4_register_and_set_t *x);
+
+/*lint -esym(621, okl4_register_and_set_setoffset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setoffset(okl4_register_and_set_t *x, okl4_vsize_t _offset);
+
+/*lint -esym(621, okl4_register_and_set_getset) */
+/*lint -esym(714, okl4_register_and_set_getset) */
+OKL4_FORCE_INLINE okl4_register_set_t
+okl4_register_and_set_getset(const okl4_register_and_set_t *x);
+
+/*lint -esym(621, okl4_register_and_set_setset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setset(okl4_register_and_set_t *x, okl4_register_set_t _set);
+
+/*lint -esym(714, okl4_register_and_set_init) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_init(okl4_register_and_set_t *x);
+
+/*lint -esym(714, okl4_register_and_set_cast) */
+OKL4_FORCE_INLINE okl4_register_and_set_t
+okl4_register_and_set_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_REGISTER_AND_SET_OFFSET_MASK) */
+#define OKL4_REGISTER_AND_SET_OFFSET_MASK ((okl4_register_and_set_t)65535U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OFFSET_REGISTER_AND_SET) */
+#define OKL4_MASK_OFFSET_REGISTER_AND_SET ((okl4_register_and_set_t)65535U)
+/*lint -esym(621, OKL4_SHIFT_OFFSET_REGISTER_AND_SET) */
+#define OKL4_SHIFT_OFFSET_REGISTER_AND_SET (0)
+/*lint -esym(621, OKL4_WIDTH_OFFSET_REGISTER_AND_SET) */
+#define OKL4_WIDTH_OFFSET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_REGISTER_AND_SET_SET_MASK) */
+#define OKL4_REGISTER_AND_SET_SET_MASK ((okl4_register_and_set_t)65535U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SET_REGISTER_AND_SET) */
+#define OKL4_MASK_SET_REGISTER_AND_SET ((okl4_register_and_set_t)65535U << 16)
+/*lint -esym(621, OKL4_SHIFT_SET_REGISTER_AND_SET) */
+#define OKL4_SHIFT_SET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_WIDTH_SET_REGISTER_AND_SET) */
+#define OKL4_WIDTH_SET_REGISTER_AND_SET (16)
+
+
+/*lint -sem(okl4_register_and_set_getoffset, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_register_and_set_getoffset) */
+/*lint -esym(714, okl4_register_and_set_getoffset) */
+OKL4_FORCE_INLINE okl4_vsize_t
+okl4_register_and_set_getoffset(const okl4_register_and_set_t *x)
+{
+    okl4_vsize_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_register_and_set_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_vsize_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_register_and_set_setoffset, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_register_and_set_setoffset) */
+
+/*lint -esym(621, okl4_register_and_set_setoffset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setoffset(okl4_register_and_set_t *x, okl4_vsize_t _offset)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_register_and_set_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_offset;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_register_and_set_getset, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_register_and_set_getset) */
+/*lint -esym(714, okl4_register_and_set_getset) */
+OKL4_FORCE_INLINE okl4_register_set_t
+okl4_register_and_set_getset(const okl4_register_and_set_t *x)
+{
+    okl4_register_set_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        okl4_register_and_set_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_register_set_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_register_and_set_setset, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_register_and_set_setset) */
+
+/*lint -esym(621, okl4_register_and_set_setset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setset(okl4_register_and_set_t *x, okl4_register_set_t _set)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        okl4_register_and_set_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_set;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_register_and_set_init) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_init(okl4_register_and_set_t *x)
+{
+    *x = (okl4_register_and_set_t)0U;
+}
+
+/*lint -esym(714, okl4_register_and_set_cast) */
+OKL4_FORCE_INLINE okl4_register_and_set_t
+okl4_register_and_set_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_register_and_set_t x = (okl4_register_and_set_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct okl4_cpu_registers {
+    okl4_register_t x[31];
+    okl4_register_t sp_el0;
+    okl4_register_t ip;
+    uint32_t cpsr;
+    _okl4_padding_t __padding0_4; /**< Padding 8 */
+    _okl4_padding_t __padding1_5; /**< Padding 8 */
+    _okl4_padding_t __padding2_6; /**< Padding 8 */
+    _okl4_padding_t __padding3_7; /**< Padding 8 */
+    okl4_register_t sp_EL1;
+    okl4_register_t elr_EL1;
+    uint32_t spsr_EL1;
+    uint32_t spsr_abt;
+    uint32_t spsr_und;
+    uint32_t spsr_irq;
+    uint32_t spsr_fiq;
+    uint32_t csselr_EL1;
+    okl4_arm_sctlr_t sctlr_EL1;
+    uint32_t cpacr_EL1;
+    uint64_t ttbr0_EL1;
+    uint64_t ttbr1_EL1;
+    uint64_t tcr_EL1;
+    uint32_t dacr32_EL2;
+    uint32_t ifsr32_EL2;
+    uint32_t esr_EL1;
+    _okl4_padding_t __padding4_4; /**< Padding 8 */
+    _okl4_padding_t __padding5_5; /**< Padding 8 */
+    _okl4_padding_t __padding6_6; /**< Padding 8 */
+    _okl4_padding_t __padding7_7; /**< Padding 8 */
+    uint64_t far_EL1;
+    uint64_t par_EL1;
+    uint64_t mair_EL1;
+    uint64_t vbar_EL1;
+    uint32_t contextidr_EL1;
+    _okl4_padding_t __padding8_4; /**< Padding 8 */
+    _okl4_padding_t __padding9_5; /**< Padding 8 */
+    _okl4_padding_t __padding10_6; /**< Padding 8 */
+    _okl4_padding_t __padding11_7; /**< Padding 8 */
+    uint64_t tpidr_EL1;
+    uint64_t tpidrro_EL0;
+    uint64_t tpidr_EL0;
+    uint32_t pmcr_EL0;
+    _okl4_padding_t __padding12_4; /**< Padding 8 */
+    _okl4_padding_t __padding13_5; /**< Padding 8 */
+    _okl4_padding_t __padding14_6; /**< Padding 8 */
+    _okl4_padding_t __padding15_7; /**< Padding 8 */
+    uint64_t pmccntr_EL0;
+    uint32_t fpexc32_EL2;
+    uint32_t cntkctl_EL1;
+};
+
+
+
+
+
+
+/**
+    The okl4_cpu_registers_t type represents a set of CPU general-purpose
+    registers on the native machine.
+*/
+
+typedef struct okl4_cpu_registers okl4_cpu_registers_t;
+
+
+
+
+/**
+    The `okl4_rights_t` type represents a set of operations that are allowed to
+    be performed using a given cap.
+*/
+
+typedef uint32_t okl4_rights_t;
+
+
+
+
+
+typedef uint64_t okl4_soc_time_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_schedule_profile_data {
+    okl4_soc_time_t timestamp;
+    okl4_soc_time_t cpu_time;
+    okl4_count_t context_switches;
+    okl4_count_t cpu_migrations;
+    okl4_count_t cpu_hwirqs;
+    okl4_count_t cpu_virqs;
+};
+
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS
+*/
+
+/*lint -esym(621, okl4_scheduler_virq_flags_t) */
+typedef okl4_virq_flags_t okl4_scheduler_virq_flags_t;
+
+/*lint -esym(621, okl4_scheduler_virq_flags_getpowersuspended) */
+/*lint -esym(714, okl4_scheduler_virq_flags_getpowersuspended) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_scheduler_virq_flags_getpowersuspended(const okl4_scheduler_virq_flags_t *x);
+
+/*lint -esym(621, okl4_scheduler_virq_flags_setpowersuspended) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_setpowersuspended(okl4_scheduler_virq_flags_t *x, okl4_bool_t _power_suspended);
+
+/*lint -esym(714, okl4_scheduler_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_init(okl4_scheduler_virq_flags_t *x);
+
+/*lint -esym(714, okl4_scheduler_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_scheduler_virq_flags_t
+okl4_scheduler_virq_flags_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_SCHEDULER_VIRQ_FLAGS_POWER_SUSPENDED_MASK) */
+#define OKL4_SCHEDULER_VIRQ_FLAGS_POWER_SUSPENDED_MASK ((okl4_scheduler_virq_flags_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS ((okl4_scheduler_virq_flags_t)1U)
+/*lint -esym(621, OKL4_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (1)
+
+
+/*lint -sem(okl4_scheduler_virq_flags_getpowersuspended, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_scheduler_virq_flags_getpowersuspended) */
+/*lint -esym(714, okl4_scheduler_virq_flags_getpowersuspended) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_scheduler_virq_flags_getpowersuspended(const okl4_scheduler_virq_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_scheduler_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_scheduler_virq_flags_setpowersuspended, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_scheduler_virq_flags_setpowersuspended) */
+
+/*lint -esym(621, okl4_scheduler_virq_flags_setpowersuspended) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_setpowersuspended(okl4_scheduler_virq_flags_t *x, okl4_bool_t _power_suspended)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_scheduler_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_power_suspended;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_scheduler_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_init(okl4_scheduler_virq_flags_t *x)
+{
+    *x = (okl4_scheduler_virq_flags_t)0U;
+}
+
+/*lint -esym(714, okl4_scheduler_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_scheduler_virq_flags_t
+okl4_scheduler_virq_flags_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_scheduler_virq_flags_t x = (okl4_scheduler_virq_flags_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    The `okl4_scount_t` type represents a natural number of items or
+    iterations. Negative values represent errors. Use `okl4_count_t` if error
+    values are not required.
+*/
+
+typedef int32_t okl4_scount_t;
+
+
+
+
+/**
+    The SDK_VERSION contains a global SDK wide versioning of software.
+
+    - BITS 5..0 -   @ref OKL4_MASK_MAINTENANCE_SDK_VERSION
+    - BITS 15..8 -   @ref OKL4_MASK_RELEASE_SDK_VERSION
+    - BITS 21..16 -   @ref OKL4_MASK_MINOR_SDK_VERSION
+    - BITS 27..24 -   @ref OKL4_MASK_MAJOR_SDK_VERSION
+    - BIT 28 -   @ref OKL4_MASK_RES0_FLAG_SDK_VERSION
+    - BIT 30 -   @ref OKL4_MASK_DEV_FLAG_SDK_VERSION
+    - BIT 31 -   @ref OKL4_MASK_FORMAT_FLAG_SDK_VERSION
+*/
+
+/*lint -esym(621, okl4_sdk_version_t) */
+typedef uint32_t okl4_sdk_version_t;
+
+/*lint -esym(621, okl4_sdk_version_getformatflag) */
+/*lint -esym(714, okl4_sdk_version_getformatflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getformatflag(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setformatflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setformatflag(okl4_sdk_version_t *x, uint32_t _format_flag);
+
+/*lint -esym(621, okl4_sdk_version_getdevflag) */
+/*lint -esym(714, okl4_sdk_version_getdevflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getdevflag(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setdevflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setdevflag(okl4_sdk_version_t *x, uint32_t _dev_flag);
+
+/*lint -esym(621, okl4_sdk_version_getres0flag) */
+/*lint -esym(714, okl4_sdk_version_getres0flag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getres0flag(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setres0flag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setres0flag(okl4_sdk_version_t *x, uint32_t _res0_flag);
+
+/*lint -esym(621, okl4_sdk_version_getmajor) */
+/*lint -esym(714, okl4_sdk_version_getmajor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmajor(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setmajor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmajor(okl4_sdk_version_t *x, uint32_t _major);
+
+/*lint -esym(621, okl4_sdk_version_getminor) */
+/*lint -esym(714, okl4_sdk_version_getminor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getminor(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setminor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setminor(okl4_sdk_version_t *x, uint32_t _minor);
+
+/*lint -esym(621, okl4_sdk_version_getrelease) */
+/*lint -esym(714, okl4_sdk_version_getrelease) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getrelease(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setrelease) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setrelease(okl4_sdk_version_t *x, uint32_t _release);
+
+/*lint -esym(621, okl4_sdk_version_getmaintenance) */
+/*lint -esym(714, okl4_sdk_version_getmaintenance) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmaintenance(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setmaintenance) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmaintenance(okl4_sdk_version_t *x, uint32_t _maintenance);
+
+/*lint -esym(714, okl4_sdk_version_init) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_init(okl4_sdk_version_t *x);
+
+/*lint -esym(714, okl4_sdk_version_cast) */
+OKL4_FORCE_INLINE okl4_sdk_version_t
+okl4_sdk_version_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_SDK_VERSION_MAINTENANCE_MASK) */
+#define OKL4_SDK_VERSION_MAINTENANCE_MASK ((okl4_sdk_version_t)63U) /* Deprecated */
+/** Maintenance number */
+/*lint -esym(621, OKL4_MASK_MAINTENANCE_SDK_VERSION) */
+#define OKL4_MASK_MAINTENANCE_SDK_VERSION ((okl4_sdk_version_t)63U)
+/*lint -esym(621, OKL4_SHIFT_MAINTENANCE_SDK_VERSION) */
+#define OKL4_SHIFT_MAINTENANCE_SDK_VERSION (0)
+/*lint -esym(621, OKL4_WIDTH_MAINTENANCE_SDK_VERSION) */
+#define OKL4_WIDTH_MAINTENANCE_SDK_VERSION (6)
+/*lint -esym(621, OKL4_SDK_VERSION_RELEASE_MASK) */
+#define OKL4_SDK_VERSION_RELEASE_MASK ((okl4_sdk_version_t)255U << 8) /* Deprecated */
+/** SDK Release Number */
+/*lint -esym(621, OKL4_MASK_RELEASE_SDK_VERSION) */
+#define OKL4_MASK_RELEASE_SDK_VERSION ((okl4_sdk_version_t)255U << 8)
+/*lint -esym(621, OKL4_SHIFT_RELEASE_SDK_VERSION) */
+#define OKL4_SHIFT_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_WIDTH_RELEASE_SDK_VERSION) */
+#define OKL4_WIDTH_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_SDK_VERSION_MINOR_MASK) */
+#define OKL4_SDK_VERSION_MINOR_MASK ((okl4_sdk_version_t)63U << 16) /* Deprecated */
+/** SDK Minor Number */
+/*lint -esym(621, OKL4_MASK_MINOR_SDK_VERSION) */
+#define OKL4_MASK_MINOR_SDK_VERSION ((okl4_sdk_version_t)63U << 16)
+/*lint -esym(621, OKL4_SHIFT_MINOR_SDK_VERSION) */
+#define OKL4_SHIFT_MINOR_SDK_VERSION (16)
+/*lint -esym(621, OKL4_WIDTH_MINOR_SDK_VERSION) */
+#define OKL4_WIDTH_MINOR_SDK_VERSION (6)
+/*lint -esym(621, OKL4_SDK_VERSION_MAJOR_MASK) */
+#define OKL4_SDK_VERSION_MAJOR_MASK ((okl4_sdk_version_t)15U << 24) /* Deprecated */
+/** SDK Major Number */
+/*lint -esym(621, OKL4_MASK_MAJOR_SDK_VERSION) */
+#define OKL4_MASK_MAJOR_SDK_VERSION ((okl4_sdk_version_t)15U << 24)
+/*lint -esym(621, OKL4_SHIFT_MAJOR_SDK_VERSION) */
+#define OKL4_SHIFT_MAJOR_SDK_VERSION (24)
+/*lint -esym(621, OKL4_WIDTH_MAJOR_SDK_VERSION) */
+#define OKL4_WIDTH_MAJOR_SDK_VERSION (4)
+/*lint -esym(621, OKL4_SDK_VERSION_RES0_FLAG_MASK) */
+#define OKL4_SDK_VERSION_RES0_FLAG_MASK ((okl4_sdk_version_t)1U << 28) /* Deprecated */
+/** Reserved */
+/*lint -esym(621, OKL4_MASK_RES0_FLAG_SDK_VERSION) */
+#define OKL4_MASK_RES0_FLAG_SDK_VERSION ((okl4_sdk_version_t)1U << 28)
+/*lint -esym(621, OKL4_SHIFT_RES0_FLAG_SDK_VERSION) */
+#define OKL4_SHIFT_RES0_FLAG_SDK_VERSION (28)
+/*lint -esym(621, OKL4_WIDTH_RES0_FLAG_SDK_VERSION) */
+#define OKL4_WIDTH_RES0_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_SDK_VERSION_DEV_FLAG_MASK) */
+#define OKL4_SDK_VERSION_DEV_FLAG_MASK ((okl4_sdk_version_t)1U << 30) /* Deprecated */
+/** Unreleased internal development version */
+/*lint -esym(621, OKL4_MASK_DEV_FLAG_SDK_VERSION) */
+#define OKL4_MASK_DEV_FLAG_SDK_VERSION ((okl4_sdk_version_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_DEV_FLAG_SDK_VERSION) */
+#define OKL4_SHIFT_DEV_FLAG_SDK_VERSION (30)
+/*lint -esym(621, OKL4_WIDTH_DEV_FLAG_SDK_VERSION) */
+#define OKL4_WIDTH_DEV_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_SDK_VERSION_FORMAT_FLAG_MASK) */
+#define OKL4_SDK_VERSION_FORMAT_FLAG_MASK ((okl4_sdk_version_t)1U << 31) /* Deprecated */
+/** Format: 0 = Version format 1, 1 = Reserved */
+/*lint -esym(621, OKL4_MASK_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_MASK_FORMAT_FLAG_SDK_VERSION ((okl4_sdk_version_t)1U << 31)
+/*lint -esym(621, OKL4_SHIFT_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_SHIFT_FORMAT_FLAG_SDK_VERSION (31)
+/*lint -esym(621, OKL4_WIDTH_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_WIDTH_FORMAT_FLAG_SDK_VERSION (1)
+
+
+/*lint -sem(okl4_sdk_version_getmaintenance, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, okl4_sdk_version_getmaintenance) */
+/*lint -esym(714, okl4_sdk_version_getmaintenance) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmaintenance(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 6;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setmaintenance, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, okl4_sdk_version_setmaintenance) */
+
+/*lint -esym(621, okl4_sdk_version_setmaintenance) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmaintenance(okl4_sdk_version_t *x, uint32_t _maintenance)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 6;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_maintenance;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getrelease, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_sdk_version_getrelease) */
+/*lint -esym(714, okl4_sdk_version_getrelease) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getrelease(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            uint32_t field : 8;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setrelease, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_sdk_version_setrelease) */
+
+/*lint -esym(621, okl4_sdk_version_setrelease) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setrelease(okl4_sdk_version_t *x, uint32_t _release)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            uint32_t field : 8;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_release;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getminor, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, okl4_sdk_version_getminor) */
+/*lint -esym(714, okl4_sdk_version_getminor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getminor(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 6;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setminor, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, okl4_sdk_version_setminor) */
+
+/*lint -esym(621, okl4_sdk_version_setminor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setminor(okl4_sdk_version_t *x, uint32_t _minor)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 6;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_minor;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getmajor, 1p, @n >= 0 && @n <= 15) */
+/*lint -esym(621, okl4_sdk_version_getmajor) */
+/*lint -esym(714, okl4_sdk_version_getmajor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmajor(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 4;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setmajor, 2n >= 0 && 2n <= 15) */
+/*lint -esym(714, okl4_sdk_version_setmajor) */
+
+/*lint -esym(621, okl4_sdk_version_setmajor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmajor(okl4_sdk_version_t *x, uint32_t _major)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 4;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_major;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getres0flag, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_sdk_version_getres0flag) */
+/*lint -esym(714, okl4_sdk_version_getres0flag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getres0flag(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setres0flag, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_sdk_version_setres0flag) */
+
+/*lint -esym(621, okl4_sdk_version_setres0flag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setres0flag(okl4_sdk_version_t *x, uint32_t _res0_flag)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_res0_flag;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getdevflag, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_sdk_version_getdevflag) */
+/*lint -esym(714, okl4_sdk_version_getdevflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getdevflag(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setdevflag, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_sdk_version_setdevflag) */
+
+/*lint -esym(621, okl4_sdk_version_setdevflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setdevflag(okl4_sdk_version_t *x, uint32_t _dev_flag)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_dev_flag;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getformatflag, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_sdk_version_getformatflag) */
+/*lint -esym(714, okl4_sdk_version_getformatflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getformatflag(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 31;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setformatflag, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_sdk_version_setformatflag) */
+
+/*lint -esym(621, okl4_sdk_version_setformatflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setformatflag(okl4_sdk_version_t *x, uint32_t _format_flag)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 31;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_format_flag;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_sdk_version_init) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_init(okl4_sdk_version_t *x)
+{
+    *x = (okl4_sdk_version_t)0U;
+}
+
+/*lint -esym(714, okl4_sdk_version_cast) */
+OKL4_FORCE_INLINE okl4_sdk_version_t
+okl4_sdk_version_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_sdk_version_t x = (okl4_sdk_version_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+
+*/
+
+struct okl4_shared_buffer {
+    okl4_paddr_t physical_base;
+    struct okl4_virtmem_item virtmem_item;
+    okl4_kcap_t cap;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_shared_buffers_array {
+    __ptr64(struct okl4_shared_buffer *, buffers);
+    okl4_count_t num_buffers;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+
+typedef okl4_kcap_t okl4_signal_t;
+
+
+
+
+
+
+
+
+/**
+    The `okl4_sregister_t` type represents a signed, machine-native
+    register-sized integer value.
+*/
+
+typedef int64_t okl4_sregister_t;
+
+
+
+
+
+typedef uint64_t okl4_ticks_t;
+
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_ACTIVE_TIMER_FLAGS
+    - BIT 1 -   @ref OKL4_MASK_PERIODIC_TIMER_FLAGS
+    - BIT 2 -   @ref OKL4_MASK_ABSOLUTE_TIMER_FLAGS
+    - BIT 3 -   @ref OKL4_MASK_UNITS_TIMER_FLAGS
+    - BIT 4 -   @ref OKL4_MASK_ALIGN_TIMER_FLAGS
+    - BIT 5 -   @ref OKL4_MASK_WATCHDOG_TIMER_FLAGS
+    - BIT 30 -   @ref OKL4_MASK_RELOAD_TIMER_FLAGS
+    - BIT 31 -   @ref OKL4_MASK_TIMESLICE_TIMER_FLAGS
+*/
+
+/*lint -esym(621, okl4_timer_flags_t) */
+typedef uint32_t okl4_timer_flags_t;
+
+/*lint -esym(621, okl4_timer_flags_getactive) */
+/*lint -esym(714, okl4_timer_flags_getactive) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getactive(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setactive) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setactive(okl4_timer_flags_t *x, okl4_bool_t _active);
+
+/*lint -esym(621, okl4_timer_flags_getperiodic) */
+/*lint -esym(714, okl4_timer_flags_getperiodic) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getperiodic(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setperiodic) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setperiodic(okl4_timer_flags_t *x, okl4_bool_t _periodic);
+
+/*lint -esym(621, okl4_timer_flags_getabsolute) */
+/*lint -esym(714, okl4_timer_flags_getabsolute) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getabsolute(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setabsolute) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setabsolute(okl4_timer_flags_t *x, okl4_bool_t _absolute);
+
+/*lint -esym(621, okl4_timer_flags_getunits) */
+/*lint -esym(714, okl4_timer_flags_getunits) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getunits(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setunits) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setunits(okl4_timer_flags_t *x, okl4_bool_t _units);
+
+/*lint -esym(621, okl4_timer_flags_getalign) */
+/*lint -esym(714, okl4_timer_flags_getalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getalign(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setalign) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setalign(okl4_timer_flags_t *x, okl4_bool_t _align);
+
+/*lint -esym(621, okl4_timer_flags_getwatchdog) */
+/*lint -esym(714, okl4_timer_flags_getwatchdog) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getwatchdog(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setwatchdog) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setwatchdog(okl4_timer_flags_t *x, okl4_bool_t _watchdog);
+
+/*lint -esym(621, okl4_timer_flags_getreload) */
+/*lint -esym(714, okl4_timer_flags_getreload) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getreload(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setreload) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setreload(okl4_timer_flags_t *x, okl4_bool_t _reload);
+
+/*lint -esym(621, okl4_timer_flags_gettimeslice) */
+/*lint -esym(714, okl4_timer_flags_gettimeslice) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_gettimeslice(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_settimeslice) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_settimeslice(okl4_timer_flags_t *x, okl4_bool_t _timeslice);
+
+/*lint -esym(714, okl4_timer_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_init(okl4_timer_flags_t *x);
+
+/*lint -esym(714, okl4_timer_flags_cast) */
+OKL4_FORCE_INLINE okl4_timer_flags_t
+okl4_timer_flags_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_TIMER_FLAGS_ACTIVE_MASK) */
+#define OKL4_TIMER_FLAGS_ACTIVE_MASK ((okl4_timer_flags_t)1U) /* Deprecated */
+/** Indicates that the timer has a timeout set */
+/*lint -esym(621, OKL4_MASK_ACTIVE_TIMER_FLAGS) */
+#define OKL4_MASK_ACTIVE_TIMER_FLAGS ((okl4_timer_flags_t)1U)
+/*lint -esym(621, OKL4_SHIFT_ACTIVE_TIMER_FLAGS) */
+#define OKL4_SHIFT_ACTIVE_TIMER_FLAGS (0)
+/*lint -esym(621, OKL4_WIDTH_ACTIVE_TIMER_FLAGS) */
+#define OKL4_WIDTH_ACTIVE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_PERIODIC_MASK) */
+#define OKL4_TIMER_FLAGS_PERIODIC_MASK ((okl4_timer_flags_t)1U << 1) /* Deprecated */
+/** Indicates that the timer is periodic, otherwise it is one-shot */
+/*lint -esym(621, OKL4_MASK_PERIODIC_TIMER_FLAGS) */
+#define OKL4_MASK_PERIODIC_TIMER_FLAGS ((okl4_timer_flags_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_PERIODIC_TIMER_FLAGS) */
+#define OKL4_SHIFT_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_WIDTH_PERIODIC_TIMER_FLAGS) */
+#define OKL4_WIDTH_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_ABSOLUTE_MASK) */
+#define OKL4_TIMER_FLAGS_ABSOLUTE_MASK ((okl4_timer_flags_t)1U << 2) /* Deprecated */
+/** Indicates that the timeout value is absolute, otherwise it is relative */
+/*lint -esym(621, OKL4_MASK_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_MASK_ABSOLUTE_TIMER_FLAGS ((okl4_timer_flags_t)1U << 2)
+/*lint -esym(621, OKL4_SHIFT_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_SHIFT_ABSOLUTE_TIMER_FLAGS (2)
+/*lint -esym(621, OKL4_WIDTH_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_WIDTH_ABSOLUTE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_UNITS_MASK) */
+#define OKL4_TIMER_FLAGS_UNITS_MASK ((okl4_timer_flags_t)1U << 3) /* Deprecated */
+/** Select time in UNITS of raw ticks */
+/*lint -esym(621, OKL4_MASK_UNITS_TIMER_FLAGS) */
+#define OKL4_MASK_UNITS_TIMER_FLAGS ((okl4_timer_flags_t)1U << 3)
+/*lint -esym(621, OKL4_SHIFT_UNITS_TIMER_FLAGS) */
+#define OKL4_SHIFT_UNITS_TIMER_FLAGS (3)
+/*lint -esym(621, OKL4_WIDTH_UNITS_TIMER_FLAGS) */
+#define OKL4_WIDTH_UNITS_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_ALIGN_MASK) */
+#define OKL4_TIMER_FLAGS_ALIGN_MASK ((okl4_timer_flags_t)1U << 4) /* Deprecated */
+/** Align first timeout of a periodic timer to a multiple of the timeout length */
+/*lint -esym(621, OKL4_MASK_ALIGN_TIMER_FLAGS) */
+#define OKL4_MASK_ALIGN_TIMER_FLAGS ((okl4_timer_flags_t)1U << 4)
+/*lint -esym(621, OKL4_SHIFT_ALIGN_TIMER_FLAGS) */
+#define OKL4_SHIFT_ALIGN_TIMER_FLAGS (4)
+/*lint -esym(621, OKL4_WIDTH_ALIGN_TIMER_FLAGS) */
+#define OKL4_WIDTH_ALIGN_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_WATCHDOG_MASK) */
+#define OKL4_TIMER_FLAGS_WATCHDOG_MASK ((okl4_timer_flags_t)1U << 5) /* Deprecated */
+/** Enter the kernel interactive debugger on timer expiry (no effect for production builds of the kernel) */
+/*lint -esym(621, OKL4_MASK_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_MASK_WATCHDOG_TIMER_FLAGS ((okl4_timer_flags_t)1U << 5)
+/*lint -esym(621, OKL4_SHIFT_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_SHIFT_WATCHDOG_TIMER_FLAGS (5)
+/*lint -esym(621, OKL4_WIDTH_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_WIDTH_WATCHDOG_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_RELOAD_MASK) */
+#define OKL4_TIMER_FLAGS_RELOAD_MASK ((okl4_timer_flags_t)1U << 30) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RELOAD_TIMER_FLAGS) */
+#define OKL4_MASK_RELOAD_TIMER_FLAGS ((okl4_timer_flags_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_RELOAD_TIMER_FLAGS) */
+#define OKL4_SHIFT_RELOAD_TIMER_FLAGS (30)
+/*lint -esym(621, OKL4_WIDTH_RELOAD_TIMER_FLAGS) */
+#define OKL4_WIDTH_RELOAD_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_TIMESLICE_MASK) */
+#define OKL4_TIMER_FLAGS_TIMESLICE_MASK ((okl4_timer_flags_t)1U << 31) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_MASK_TIMESLICE_TIMER_FLAGS ((okl4_timer_flags_t)1U << 31)
+/*lint -esym(621, OKL4_SHIFT_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_SHIFT_TIMESLICE_TIMER_FLAGS (31)
+/*lint -esym(621, OKL4_WIDTH_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_WIDTH_TIMESLICE_TIMER_FLAGS (1)
+
+
+/*lint -sem(okl4_timer_flags_getactive, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getactive) */
+/*lint -esym(714, okl4_timer_flags_getactive) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getactive(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setactive, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setactive) */
+
+/*lint -esym(621, okl4_timer_flags_setactive) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setactive(okl4_timer_flags_t *x, okl4_bool_t _active)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_active;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getperiodic, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getperiodic) */
+/*lint -esym(714, okl4_timer_flags_getperiodic) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getperiodic(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setperiodic, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setperiodic) */
+
+/*lint -esym(621, okl4_timer_flags_setperiodic) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setperiodic(okl4_timer_flags_t *x, okl4_bool_t _periodic)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_periodic;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getabsolute, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getabsolute) */
+/*lint -esym(714, okl4_timer_flags_getabsolute) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getabsolute(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setabsolute, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setabsolute) */
+
+/*lint -esym(621, okl4_timer_flags_setabsolute) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setabsolute(okl4_timer_flags_t *x, okl4_bool_t _absolute)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_absolute;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getunits, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getunits) */
+/*lint -esym(714, okl4_timer_flags_getunits) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getunits(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setunits, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setunits) */
+
+/*lint -esym(621, okl4_timer_flags_setunits) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setunits(okl4_timer_flags_t *x, okl4_bool_t _units)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_units;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getalign, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getalign) */
+/*lint -esym(714, okl4_timer_flags_getalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getalign(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setalign, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setalign) */
+
+/*lint -esym(621, okl4_timer_flags_setalign) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setalign(okl4_timer_flags_t *x, okl4_bool_t _align)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_align;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getwatchdog, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getwatchdog) */
+/*lint -esym(714, okl4_timer_flags_getwatchdog) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getwatchdog(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setwatchdog, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setwatchdog) */
+
+/*lint -esym(621, okl4_timer_flags_setwatchdog) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setwatchdog(okl4_timer_flags_t *x, okl4_bool_t _watchdog)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_watchdog;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getreload, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getreload) */
+/*lint -esym(714, okl4_timer_flags_getreload) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getreload(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setreload, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setreload) */
+
+/*lint -esym(621, okl4_timer_flags_setreload) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setreload(okl4_timer_flags_t *x, okl4_bool_t _reload)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_reload;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_gettimeslice, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_gettimeslice) */
+/*lint -esym(714, okl4_timer_flags_gettimeslice) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_gettimeslice(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 31;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_settimeslice, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_settimeslice) */
+
+/*lint -esym(621, okl4_timer_flags_settimeslice) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_settimeslice(okl4_timer_flags_t *x, okl4_bool_t _timeslice)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 31;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_timeslice;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_timer_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_init(okl4_timer_flags_t *x)
+{
+    *x = (okl4_timer_flags_t)0U;
+}
+
+/*lint -esym(714, okl4_timer_flags_cast) */
+OKL4_FORCE_INLINE okl4_timer_flags_t
+okl4_timer_flags_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_timer_flags_t x = (okl4_timer_flags_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct _okl4_tracebuffer_buffer_header {
+    okl4_soc_time_t timestamp;
+    okl4_count_t wrap;
+    _okl4_padding_t __padding0_4; /**< Padding 8 */
+    _okl4_padding_t __padding1_5; /**< Padding 8 */
+    _okl4_padding_t __padding2_6; /**< Padding 8 */
+    _okl4_padding_t __padding3_7; /**< Padding 8 */
+    okl4_ksize_t size;
+    okl4_ksize_t head;
+    okl4_ksize_t offset;
+};
+
+
+
+
+
+
+/**
+
+*/
+
+struct okl4_tracebuffer_env {
+    struct okl4_virtmem_item virt;
+    okl4_interrupt_number_t virq;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+
+struct _okl4_tracebuffer_header {
+    uint32_t magic;
+    uint32_t version;
+    uint32_t id;
+    okl4_count_t num_buffers;
+    okl4_ksize_t buffer_size;
+    okl4_atomic_uint32_t log_mask;
+    okl4_atomic_uint32_t active_buffer;
+    okl4_atomic_uint32_t grabbed_buffer;
+    okl4_atomic_uint32_t empty_buffers;
+    struct _okl4_tracebuffer_buffer_header buffers[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+
+
+typedef uint32_t okl4_tracepoint_class_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_THREAD_STATE) */
+#define OKL4_TRACEPOINT_CLASS_THREAD_STATE ((okl4_tracepoint_class_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_SYSCALLS) */
+#define OKL4_TRACEPOINT_CLASS_SYSCALLS ((okl4_tracepoint_class_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_PRIMARY) */
+#define OKL4_TRACEPOINT_CLASS_PRIMARY ((okl4_tracepoint_class_t)0x2U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_SECONDARY) */
+#define OKL4_TRACEPOINT_CLASS_SECONDARY ((okl4_tracepoint_class_t)0x3U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_TERTIARY) */
+#define OKL4_TRACEPOINT_CLASS_TERTIARY ((okl4_tracepoint_class_t)0x4U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_MAX) */
+#define OKL4_TRACEPOINT_CLASS_MAX ((okl4_tracepoint_class_t)0x4U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_INVALID) */
+#define OKL4_TRACEPOINT_CLASS_INVALID ((okl4_tracepoint_class_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_class_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_class_is_element_of(okl4_tracepoint_class_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_class_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_class_is_element_of(okl4_tracepoint_class_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_TRACEPOINT_CLASS_THREAD_STATE) ||
+            (var == OKL4_TRACEPOINT_CLASS_SYSCALLS) ||
+            (var == OKL4_TRACEPOINT_CLASS_PRIMARY) ||
+            (var == OKL4_TRACEPOINT_CLASS_SECONDARY) ||
+            (var == OKL4_TRACEPOINT_CLASS_TERTIARY));
+}
+
+
+/**
+    - BITS 7..0 -   @ref _OKL4_MASK_ID_TRACEPOINT_DESC
+    - BIT 8 -   @ref _OKL4_MASK_USER_TRACEPOINT_DESC
+    - BIT 9 -   @ref _OKL4_MASK_BIN_TRACEPOINT_DESC
+    - BITS 15..10 -   @ref _OKL4_MASK_RECLEN_TRACEPOINT_DESC
+    - BITS 21..16 -   @ref _OKL4_MASK_CPUID_TRACEPOINT_DESC
+    - BITS 27..22 -   @ref _OKL4_MASK_THREADID_TRACEPOINT_DESC
+    - BITS 31..28 -   @ref _OKL4_MASK__R1_TRACEPOINT_DESC
+*/
+
+/*lint -esym(621, _okl4_tracepoint_desc_t) */
+typedef uint32_t _okl4_tracepoint_desc_t;
+
+/*lint -esym(621, _okl4_tracepoint_desc_getid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getid(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setid(_okl4_tracepoint_desc_t *x, uint32_t _id);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getuser) */
+/*lint -esym(714, _okl4_tracepoint_desc_getuser) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getuser(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setuser) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setuser(_okl4_tracepoint_desc_t *x, okl4_bool_t _user);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getbin) */
+/*lint -esym(714, _okl4_tracepoint_desc_getbin) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getbin(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setbin) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setbin(_okl4_tracepoint_desc_t *x, okl4_bool_t _bin);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getreclen) */
+/*lint -esym(714, _okl4_tracepoint_desc_getreclen) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getreclen(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setreclen) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setreclen(_okl4_tracepoint_desc_t *x, uint32_t _reclen);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getcpuid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getcpuid) */
+OKL4_FORCE_INLINE okl4_count_t
+_okl4_tracepoint_desc_getcpuid(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setcpuid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setcpuid(_okl4_tracepoint_desc_t *x, okl4_count_t _cpuid);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getthreadid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getthreadid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getthreadid(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setthreadid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setthreadid(_okl4_tracepoint_desc_t *x, uint32_t _threadid);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getr1) */
+/*lint -esym(714, _okl4_tracepoint_desc_getr1) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getr1(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setr1) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setr1(_okl4_tracepoint_desc_t *x, uint32_t __r1);
+
+/*lint -esym(714, _okl4_tracepoint_desc_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_init(_okl4_tracepoint_desc_t *x);
+
+/*lint -esym(714, _okl4_tracepoint_desc_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_desc_t
+_okl4_tracepoint_desc_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_ID_MASK) */
+#define _OKL4_TRACEPOINT_DESC_ID_MASK ((_okl4_tracepoint_desc_t)255U) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_ID_TRACEPOINT_DESC) */
+#define _OKL4_MASK_ID_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)255U)
+/*lint -esym(621, _OKL4_SHIFT_ID_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_ID_TRACEPOINT_DESC (0)
+/*lint -esym(621, _OKL4_WIDTH_ID_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_ID_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_USER_MASK) */
+#define _OKL4_TRACEPOINT_DESC_USER_MASK ((_okl4_tracepoint_desc_t)1U << 8) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_USER_TRACEPOINT_DESC) */
+#define _OKL4_MASK_USER_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)1U << 8)
+/*lint -esym(621, _OKL4_SHIFT_USER_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_USER_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_WIDTH_USER_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_USER_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_BIN_MASK) */
+#define _OKL4_TRACEPOINT_DESC_BIN_MASK ((_okl4_tracepoint_desc_t)1U << 9) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_BIN_TRACEPOINT_DESC) */
+#define _OKL4_MASK_BIN_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)1U << 9)
+/*lint -esym(621, _OKL4_SHIFT_BIN_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_BIN_TRACEPOINT_DESC (9)
+/*lint -esym(621, _OKL4_WIDTH_BIN_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_BIN_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_RECLEN_MASK) */
+#define _OKL4_TRACEPOINT_DESC_RECLEN_MASK ((_okl4_tracepoint_desc_t)63U << 10) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_MASK_RECLEN_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)63U << 10)
+/*lint -esym(621, _OKL4_SHIFT_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_RECLEN_TRACEPOINT_DESC (10)
+/*lint -esym(621, _OKL4_WIDTH_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_RECLEN_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_CPUID_MASK) */
+#define _OKL4_TRACEPOINT_DESC_CPUID_MASK ((_okl4_tracepoint_desc_t)63U << 16) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_MASK_CPUID_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)63U << 16)
+/*lint -esym(621, _OKL4_SHIFT_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_CPUID_TRACEPOINT_DESC (16)
+/*lint -esym(621, _OKL4_WIDTH_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_CPUID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_THREADID_MASK) */
+#define _OKL4_TRACEPOINT_DESC_THREADID_MASK ((_okl4_tracepoint_desc_t)63U << 22) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_MASK_THREADID_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)63U << 22)
+/*lint -esym(621, _OKL4_SHIFT_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_THREADID_TRACEPOINT_DESC (22)
+/*lint -esym(621, _OKL4_WIDTH_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_THREADID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC__R1_MASK) */
+#define _OKL4_TRACEPOINT_DESC__R1_MASK ((_okl4_tracepoint_desc_t)15U << 28) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK__R1_TRACEPOINT_DESC) */
+#define _OKL4_MASK__R1_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)15U << 28)
+/*lint -esym(621, _OKL4_SHIFT__R1_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT__R1_TRACEPOINT_DESC (28)
+/*lint -esym(621, _OKL4_WIDTH__R1_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH__R1_TRACEPOINT_DESC (4)
+
+
+/*lint -sem(_okl4_tracepoint_desc_getid, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, _okl4_tracepoint_desc_getid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getid(const _okl4_tracepoint_desc_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 8;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setid, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, _okl4_tracepoint_desc_setid) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setid(_okl4_tracepoint_desc_t *x, uint32_t _id)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 8;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_id;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getuser, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, _okl4_tracepoint_desc_getuser) */
+/*lint -esym(714, _okl4_tracepoint_desc_getuser) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getuser(const _okl4_tracepoint_desc_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            _Bool field : 1;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setuser, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, _okl4_tracepoint_desc_setuser) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setuser) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setuser(_okl4_tracepoint_desc_t *x, okl4_bool_t _user)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            _Bool field : 1;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_user;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getbin, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, _okl4_tracepoint_desc_getbin) */
+/*lint -esym(714, _okl4_tracepoint_desc_getbin) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getbin(const _okl4_tracepoint_desc_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 9;
+            _Bool field : 1;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setbin, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, _okl4_tracepoint_desc_setbin) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setbin) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setbin(_okl4_tracepoint_desc_t *x, okl4_bool_t _bin)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 9;
+            _Bool field : 1;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_bin;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getreclen, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, _okl4_tracepoint_desc_getreclen) */
+/*lint -esym(714, _okl4_tracepoint_desc_getreclen) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getreclen(const _okl4_tracepoint_desc_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 10;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setreclen, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, _okl4_tracepoint_desc_setreclen) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setreclen) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setreclen(_okl4_tracepoint_desc_t *x, uint32_t _reclen)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 10;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_reclen;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getcpuid, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, _okl4_tracepoint_desc_getcpuid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getcpuid) */
+OKL4_FORCE_INLINE okl4_count_t
+_okl4_tracepoint_desc_getcpuid(const _okl4_tracepoint_desc_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setcpuid, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, _okl4_tracepoint_desc_setcpuid) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setcpuid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setcpuid(_okl4_tracepoint_desc_t *x, okl4_count_t _cpuid)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_cpuid;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getthreadid, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, _okl4_tracepoint_desc_getthreadid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getthreadid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getthreadid(const _okl4_tracepoint_desc_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 22;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setthreadid, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, _okl4_tracepoint_desc_setthreadid) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setthreadid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setthreadid(_okl4_tracepoint_desc_t *x, uint32_t _threadid)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 22;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_threadid;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getr1, 1p, @n >= 0 && @n <= 15) */
+/*lint -esym(621, _okl4_tracepoint_desc_getr1) */
+/*lint -esym(714, _okl4_tracepoint_desc_getr1) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getr1(const _okl4_tracepoint_desc_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            uint32_t field : 4;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setr1, 2n >= 0 && 2n <= 15) */
+/*lint -esym(714, _okl4_tracepoint_desc_setr1) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setr1) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setr1(_okl4_tracepoint_desc_t *x, uint32_t __r1)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            uint32_t field : 4;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)__r1;
+    *x = _conv.raw;
+}
+/*lint -esym(714, _okl4_tracepoint_desc_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_init(_okl4_tracepoint_desc_t *x)
+{
+    *x = (_okl4_tracepoint_desc_t)0U;
+}
+
+/*lint -esym(714, _okl4_tracepoint_desc_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_desc_t
+_okl4_tracepoint_desc_cast(uint32_t p, okl4_bool_t force)
+{
+    _okl4_tracepoint_desc_t x = (_okl4_tracepoint_desc_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BITS 15..0 -   @ref _OKL4_MASK_CLASS_TRACEPOINT_MASKS
+    - BITS 31..16 -   @ref _OKL4_MASK_SUBSYSTEM_TRACEPOINT_MASKS
+*/
+
+/*lint -esym(621, _okl4_tracepoint_masks_t) */
+typedef uint32_t _okl4_tracepoint_masks_t;
+
+/*lint -esym(621, _okl4_tracepoint_masks_getclass) */
+/*lint -esym(714, _okl4_tracepoint_masks_getclass) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getclass(const _okl4_tracepoint_masks_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_masks_setclass) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setclass(_okl4_tracepoint_masks_t *x, uint32_t _class);
+
+/*lint -esym(621, _okl4_tracepoint_masks_getsubsystem) */
+/*lint -esym(714, _okl4_tracepoint_masks_getsubsystem) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getsubsystem(const _okl4_tracepoint_masks_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_masks_setsubsystem) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setsubsystem(_okl4_tracepoint_masks_t *x, uint32_t _subsystem);
+
+/*lint -esym(714, _okl4_tracepoint_masks_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_init(_okl4_tracepoint_masks_t *x);
+
+/*lint -esym(714, _okl4_tracepoint_masks_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_masks_t
+_okl4_tracepoint_masks_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, _OKL4_TRACEPOINT_MASKS_CLASS_MASK) */
+#define _OKL4_TRACEPOINT_MASKS_CLASS_MASK ((_okl4_tracepoint_masks_t)65535U) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_MASK_CLASS_TRACEPOINT_MASKS ((_okl4_tracepoint_masks_t)65535U)
+/*lint -esym(621, _OKL4_SHIFT_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_SHIFT_CLASS_TRACEPOINT_MASKS (0)
+/*lint -esym(621, _OKL4_WIDTH_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_WIDTH_CLASS_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_TRACEPOINT_MASKS_SUBSYSTEM_MASK) */
+#define _OKL4_TRACEPOINT_MASKS_SUBSYSTEM_MASK ((_okl4_tracepoint_masks_t)65535U << 16) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_MASK_SUBSYSTEM_TRACEPOINT_MASKS ((_okl4_tracepoint_masks_t)65535U << 16)
+/*lint -esym(621, _OKL4_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS (16)
+
+
+/*lint -sem(_okl4_tracepoint_masks_getclass, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, _okl4_tracepoint_masks_getclass) */
+/*lint -esym(714, _okl4_tracepoint_masks_getclass) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getclass(const _okl4_tracepoint_masks_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        _okl4_tracepoint_masks_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_masks_setclass, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, _okl4_tracepoint_masks_setclass) */
+
+/*lint -esym(621, _okl4_tracepoint_masks_setclass) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setclass(_okl4_tracepoint_masks_t *x, uint32_t _class)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        _okl4_tracepoint_masks_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_class;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_masks_getsubsystem, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, _okl4_tracepoint_masks_getsubsystem) */
+/*lint -esym(714, _okl4_tracepoint_masks_getsubsystem) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getsubsystem(const _okl4_tracepoint_masks_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        _okl4_tracepoint_masks_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_masks_setsubsystem, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, _okl4_tracepoint_masks_setsubsystem) */
+
+/*lint -esym(621, _okl4_tracepoint_masks_setsubsystem) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setsubsystem(_okl4_tracepoint_masks_t *x, uint32_t _subsystem)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        _okl4_tracepoint_masks_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_subsystem;
+    *x = _conv.raw;
+}
+/*lint -esym(714, _okl4_tracepoint_masks_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_init(_okl4_tracepoint_masks_t *x)
+{
+    *x = (_okl4_tracepoint_masks_t)0U;
+}
+
+/*lint -esym(714, _okl4_tracepoint_masks_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_masks_t
+_okl4_tracepoint_masks_cast(uint32_t p, okl4_bool_t force)
+{
+    _okl4_tracepoint_masks_t x = (_okl4_tracepoint_masks_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct okl4_tracepoint_entry_base {
+    uint32_t time_offset;
+    _okl4_tracepoint_masks_t masks;
+    _okl4_tracepoint_desc_t description;
+};
+
+
+
+
+
+
+
+typedef uint32_t okl4_tracepoint_evt_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE) */
+#define OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE ((okl4_tracepoint_evt_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE) */
+#define OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE ((okl4_tracepoint_evt_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH) */
+#define OKL4_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH ((okl4_tracepoint_evt_t)0x2U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME) */
+#define OKL4_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME ((okl4_tracepoint_evt_t)0x3U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV ((okl4_tracepoint_evt_t)0x4U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_HALTED) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_HALTED ((okl4_tracepoint_evt_t)0x5U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA ((okl4_tracepoint_evt_t)0x6U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE ((okl4_tracepoint_evt_t)0x7U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT ((okl4_tracepoint_evt_t)0x8U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA ((okl4_tracepoint_evt_t)0x9U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE ((okl4_tracepoint_evt_t)0xaU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT ((okl4_tracepoint_evt_t)0xbU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND ((okl4_tracepoint_evt_t)0xcU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK ((okl4_tracepoint_evt_t)0xdU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE ((okl4_tracepoint_evt_t)0xeU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED ((okl4_tracepoint_evt_t)0xfU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH ((okl4_tracepoint_evt_t)0x10U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE ((okl4_tracepoint_evt_t)0x11U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI ((okl4_tracepoint_evt_t)0x12U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING ((okl4_tracepoint_evt_t)0x13U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD ((okl4_tracepoint_evt_t)0x14U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS ((okl4_tracepoint_evt_t)0x15U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK ((okl4_tracepoint_evt_t)0x16U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE ((okl4_tracepoint_evt_t)0x17U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT ((okl4_tracepoint_evt_t)0x18U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG ((okl4_tracepoint_evt_t)0x19U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL ((okl4_tracepoint_evt_t)0x1aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY ((okl4_tracepoint_evt_t)0x1bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK ((okl4_tracepoint_evt_t)0x1cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS ((okl4_tracepoint_evt_t)0x1dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK ((okl4_tracepoint_evt_t)0x1eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT) */
+#define OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT ((okl4_tracepoint_evt_t)0x1fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) */
+#define OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME ((okl4_tracepoint_evt_t)0x20U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) */
+#define OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL ((okl4_tracepoint_evt_t)0x21U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT ((okl4_tracepoint_evt_t)0x22U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT ((okl4_tracepoint_evt_t)0x23U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE ((okl4_tracepoint_evt_t)0x24U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN ((okl4_tracepoint_evt_t)0x25U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE ((okl4_tracepoint_evt_t)0x26U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN ((okl4_tracepoint_evt_t)0x27U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE ((okl4_tracepoint_evt_t)0x28U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN ((okl4_tracepoint_evt_t)0x29U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE ((okl4_tracepoint_evt_t)0x2aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN ((okl4_tracepoint_evt_t)0x2bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS ((okl4_tracepoint_evt_t)0x2cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS ((okl4_tracepoint_evt_t)0x2dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS ((okl4_tracepoint_evt_t)0x2eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS ((okl4_tracepoint_evt_t)0x2fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) */
+#define OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL ((okl4_tracepoint_evt_t)0x30U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL) */
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL ((okl4_tracepoint_evt_t)0x31U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV) */
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV ((okl4_tracepoint_evt_t)0x32U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND ((okl4_tracepoint_evt_t)0x33U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) */
+#define OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE ((okl4_tracepoint_evt_t)0x34U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER ((okl4_tracepoint_evt_t)0x35U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS ((okl4_tracepoint_evt_t)0x36U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32 ((okl4_tracepoint_evt_t)0x37U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER ((okl4_tracepoint_evt_t)0x38U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS ((okl4_tracepoint_evt_t)0x39U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32 ((okl4_tracepoint_evt_t)0x3aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED ((okl4_tracepoint_evt_t)0x3bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED ((okl4_tracepoint_evt_t)0x3cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE ((okl4_tracepoint_evt_t)0x3dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE ((okl4_tracepoint_evt_t)0x3eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA ((okl4_tracepoint_evt_t)0x3fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE ((okl4_tracepoint_evt_t)0x40U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE ((okl4_tracepoint_evt_t)0x41U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA ((okl4_tracepoint_evt_t)0x42U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND ((okl4_tracepoint_evt_t)0x43U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL ((okl4_tracepoint_evt_t)0x44U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION ((okl4_tracepoint_evt_t)0x45U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME ((okl4_tracepoint_evt_t)0x46U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY ((okl4_tracepoint_evt_t)0x47U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_START) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_START ((okl4_tracepoint_evt_t)0x48U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) */
+#define OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC ((okl4_tracepoint_evt_t)0x49U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET ((okl4_tracepoint_evt_t)0x4aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_START) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_START ((okl4_tracepoint_evt_t)0x4bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP ((okl4_tracepoint_evt_t)0x4cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE ((okl4_tracepoint_evt_t)0x4dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV ((okl4_tracepoint_evt_t)0x4eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE ((okl4_tracepoint_evt_t)0x4fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE ((okl4_tracepoint_evt_t)0x50U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) */
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY ((okl4_tracepoint_evt_t)0x51U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE ((okl4_tracepoint_evt_t)0x52U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_MAX) */
+#define OKL4_TRACEPOINT_EVT_MAX ((okl4_tracepoint_evt_t)0x52U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_INVALID) */
+#define OKL4_TRACEPOINT_EVT_INVALID ((okl4_tracepoint_evt_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_evt_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_evt_is_element_of(okl4_tracepoint_evt_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_evt_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_evt_is_element_of(okl4_tracepoint_evt_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH) ||
+            (var == OKL4_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_HALTED) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_START) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_START) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE));
+}
+
+
+
+typedef uint32_t okl4_tracepoint_level_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_DEBUG) */
+#define OKL4_TRACEPOINT_LEVEL_DEBUG ((okl4_tracepoint_level_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_INFO) */
+#define OKL4_TRACEPOINT_LEVEL_INFO ((okl4_tracepoint_level_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_WARN) */
+#define OKL4_TRACEPOINT_LEVEL_WARN ((okl4_tracepoint_level_t)0x2U)
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_CRITICAL) */
+#define OKL4_TRACEPOINT_LEVEL_CRITICAL ((okl4_tracepoint_level_t)0x3U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_MAX) */
+#define OKL4_TRACEPOINT_LEVEL_MAX ((okl4_tracepoint_level_t)0x3U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_INVALID) */
+#define OKL4_TRACEPOINT_LEVEL_INVALID ((okl4_tracepoint_level_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_level_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_level_is_element_of(okl4_tracepoint_level_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_level_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_level_is_element_of(okl4_tracepoint_level_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_TRACEPOINT_LEVEL_DEBUG) ||
+            (var == OKL4_TRACEPOINT_LEVEL_INFO) ||
+            (var == OKL4_TRACEPOINT_LEVEL_WARN) ||
+            (var == OKL4_TRACEPOINT_LEVEL_CRITICAL));
+}
+
+
+
+typedef uint32_t okl4_tracepoint_mask_t;
+
+
+
+
+
+typedef uint32_t okl4_tracepoint_subsystem_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_SCHEDULER) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_SCHEDULER ((okl4_tracepoint_subsystem_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_TRACE) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_TRACE ((okl4_tracepoint_subsystem_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_CORE) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_CORE ((okl4_tracepoint_subsystem_t)0x2U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_MAX) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_MAX ((okl4_tracepoint_subsystem_t)0x2U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_INVALID) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_INVALID ((okl4_tracepoint_subsystem_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_subsystem_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_subsystem_is_element_of(okl4_tracepoint_subsystem_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_subsystem_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_subsystem_is_element_of(okl4_tracepoint_subsystem_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_TRACEPOINT_SUBSYSTEM_SCHEDULER) ||
+            (var == OKL4_TRACEPOINT_SUBSYSTEM_TRACE) ||
+            (var == OKL4_TRACEPOINT_SUBSYSTEM_CORE));
+}
+
+
+
+struct okl4_tracepoint_unpacked_entry {
+    struct okl4_tracepoint_entry_base entry;
+    uint32_t data[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+
+*/
+
+struct okl4_vclient_info {
+    struct okl4_axon_ep_data axon_ep;
+    __ptr64(void *, opaque);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vcpu_entry {
+    okl4_kcap_t vcpu;
+    okl4_kcap_t ipi;
+    okl4_interrupt_number_t irq;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    okl4_register_t stack_pointer;
+};
+
+
+
+
+
+typedef okl4_arm_mpidr_t okl4_vcpu_id_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_vcpu_table {
+    okl4_count_t num_vcpus;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(struct okl4_vcpu_entry *, vcpu);
+};
+
+
+
+
+/**
+    The okl4_vfp_ctrl_registers object represents the set of control
+    registers in the ARM VFP unit.
+*/
+
+struct okl4_vfp_ctrl_registers {
+    uint32_t fpsr;
+    uint32_t fpcr;
+};
+
+
+
+
+
+
+/**
+    The okl4_vfp_registers_t type represents a set of VFP registers on
+    the native machine.
+*/
+
+typedef struct okl4_vfp_ctrl_registers okl4_vfp_ctrl_registers_t;
+
+
+
+
+/**
+    The okl4_vfp_ops_t object represents the set of operations that may be
+    performed on the ARM VFP unit.
+
+    - @ref OKL4_VFP_OPS_MAX
+    - @ref OKL4_VFP_OPS_INVALID
+*/
+
+typedef uint32_t okl4_vfp_ops_t;
+
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_VFP_OPS_MAX) */
+#define OKL4_VFP_OPS_MAX ((okl4_vfp_ops_t)0x0U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_VFP_OPS_INVALID) */
+#define OKL4_VFP_OPS_INVALID ((okl4_vfp_ops_t)0xffffffffU)
+
+/*lint -esym(714, okl4_vfp_ops_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vfp_ops_is_element_of(okl4_vfp_ops_t var);
+
+
+/*lint -esym(714, okl4_vfp_ops_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vfp_ops_is_element_of(okl4_vfp_ops_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((okl4_bool_t)0);
+}
+
+
+
+struct okl4_vfp_register {
+    __attribute__((aligned(16))) uint8_t __bytes[16];
+};
+
+
+
+
+
+
+
+typedef struct okl4_vfp_register okl4_vfp_register_t;
+
+
+
+
+/**
+    The okl4_vfp_registers object represents the set of registers in the
+    ARM VFP unit, including the control registers.
+*/
+
+struct okl4_vfp_registers {
+    okl4_vfp_register_t v0;
+    okl4_vfp_register_t v1;
+    okl4_vfp_register_t v2;
+    okl4_vfp_register_t v3;
+    okl4_vfp_register_t v4;
+    okl4_vfp_register_t v5;
+    okl4_vfp_register_t v6;
+    okl4_vfp_register_t v7;
+    okl4_vfp_register_t v8;
+    okl4_vfp_register_t v9;
+    okl4_vfp_register_t v10;
+    okl4_vfp_register_t v11;
+    okl4_vfp_register_t v12;
+    okl4_vfp_register_t v13;
+    okl4_vfp_register_t v14;
+    okl4_vfp_register_t v15;
+    okl4_vfp_register_t v16;
+    okl4_vfp_register_t v17;
+    okl4_vfp_register_t v18;
+    okl4_vfp_register_t v19;
+    okl4_vfp_register_t v20;
+    okl4_vfp_register_t v21;
+    okl4_vfp_register_t v22;
+    okl4_vfp_register_t v23;
+    okl4_vfp_register_t v24;
+    okl4_vfp_register_t v25;
+    okl4_vfp_register_t v26;
+    okl4_vfp_register_t v27;
+    okl4_vfp_register_t v28;
+    okl4_vfp_register_t v29;
+    okl4_vfp_register_t v30;
+    okl4_vfp_register_t v31;
+    struct okl4_vfp_ctrl_registers control;
+    _okl4_padding_t __padding0_8; /**< Padding 16 */
+    _okl4_padding_t __padding1_9; /**< Padding 16 */
+    _okl4_padding_t __padding2_10; /**< Padding 16 */
+    _okl4_padding_t __padding3_11; /**< Padding 16 */
+    _okl4_padding_t __padding4_12; /**< Padding 16 */
+    _okl4_padding_t __padding5_13; /**< Padding 16 */
+    _okl4_padding_t __padding6_14; /**< Padding 16 */
+    _okl4_padding_t __padding7_15; /**< Padding 16 */
+};
+
+
+
+
+
+
+/**
+    The okl4_vfp_registers_t type represents a set of VFP registers on
+    the native machine.
+*/
+
+typedef struct okl4_vfp_registers okl4_vfp_registers_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_virtmem_pool {
+    struct okl4_virtmem_item pool;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_virtual_interrupt_lines {
+    okl4_count_t num_lines;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(okl4_kcap_t *, lines);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vserver_info {
+    struct {
+        __ptr64(struct okl4_axon_ep_data *, data);
+        okl4_count_t max_messages;
+        _okl4_padding_t __padding0_4; /**< Padding 8 */
+        _okl4_padding_t __padding1_5; /**< Padding 8 */
+        _okl4_padding_t __padding2_6; /**< Padding 8 */
+        _okl4_padding_t __padding3_7; /**< Padding 8 */
+        okl4_ksize_t message_size;
+    } channels;
+
+    okl4_count_t num_clients;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vservices_service_descriptor {
+    __ptr64(okl4_string_t, name);
+    __ptr64(okl4_string_t, protocol);
+    __ptr64(void *, RESERVED);
+};
+
+
+
+
+
+typedef uint32_t okl4_vservices_transport_type_t;
+
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_AXON) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_AXON ((okl4_vservices_transport_type_t)0x0U)
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER ((okl4_vservices_transport_type_t)0x1U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_MAX) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_MAX ((okl4_vservices_transport_type_t)0x1U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_INVALID) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_INVALID ((okl4_vservices_transport_type_t)0xffffffffU)
+
+/*lint -esym(714, okl4_vservices_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vservices_transport_type_is_element_of(okl4_vservices_transport_type_t var);
+
+
+/*lint -esym(714, okl4_vservices_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vservices_transport_type_is_element_of(okl4_vservices_transport_type_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_VSERVICES_TRANSPORT_TYPE_AXON) ||
+            (var == OKL4_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER));
+}
+
+
+/**
+
+*/
+
+struct okl4_vservices_transport_microvisor {
+    okl4_bool_t is_server;
+    _okl4_padding_t __padding0_1;
+    _okl4_padding_t __padding1_2;
+    _okl4_padding_t __padding2_3;
+    okl4_vservices_transport_type_t type;
+    union {
+        struct {
+            struct okl4_axon_ep_data ep;
+            okl4_ksize_t message_size;
+            okl4_count_t queue_length;
+            _okl4_padding_t __padding0_4; /**< Padding 8 */
+            _okl4_padding_t __padding1_5; /**< Padding 8 */
+            _okl4_padding_t __padding2_6; /**< Padding 8 */
+            _okl4_padding_t __padding3_7; /**< Padding 8 */
+        } axon;
+
+        struct {
+            okl4_ksize_t message_size;
+            okl4_count_t queue_length;
+            _okl4_padding_t __padding0_4; /**< Padding 8 */
+            _okl4_padding_t __padding1_5; /**< Padding 8 */
+            _okl4_padding_t __padding2_6; /**< Padding 8 */
+            _okl4_padding_t __padding3_7; /**< Padding 8 */
+            struct okl4_virtmem_item rx;
+            okl4_count_t rx_batch_size;
+            okl4_count_t rx_notify_bits;
+            struct okl4_virtmem_item tx;
+            okl4_count_t tx_batch_size;
+            okl4_count_t tx_notify_bits;
+        } shared_buffer;
+
+    } u;
+
+    struct okl4_virtual_interrupt_lines virqs_in;
+    struct okl4_virtual_interrupt_lines virqs_out;
+    okl4_count_t num_services;
+    _okl4_padding_t __padding3_4;
+    _okl4_padding_t __padding4_5;
+    _okl4_padding_t __padding5_6;
+    _okl4_padding_t __padding6_7;
+    __ptr64(struct okl4_vservices_service_descriptor *, services);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vservices_transports {
+    okl4_count_t num_transports;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(struct okl4_vservices_transport_microvisor *, transports);
+};
+
+
+
+
+
+typedef struct okl4_axon_data okl4_axon_data_t;
+typedef struct okl4_axon_ep_data okl4_axon_ep_data_t;
+typedef struct okl4_range_item okl4_range_item_t;
+typedef struct okl4_virtmem_item okl4_virtmem_item_t;
+typedef struct okl4_cell_management_item okl4_cell_management_item_t;
+typedef struct okl4_cell_management okl4_cell_management_t;
+typedef struct okl4_segment_mapping okl4_segment_mapping_t;
+typedef struct okl4_cell_management_segments okl4_cell_management_segments_t;
+typedef struct okl4_cell_management_vcpus okl4_cell_management_vcpus_t;
+typedef struct _okl4_env okl4_env_t;
+typedef struct okl4_env_access_cell okl4_env_access_cell_t;
+typedef struct okl4_env_access_entry okl4_env_access_entry_t;
+typedef struct okl4_env_access_table okl4_env_access_table_t;
+typedef struct okl4_env_args okl4_env_args_t;
+typedef struct okl4_env_interrupt_device_map okl4_env_interrupt_device_map_t;
+typedef struct okl4_interrupt okl4_interrupt_t;
+typedef struct okl4_env_interrupt_handle okl4_env_interrupt_handle_t;
+typedef struct okl4_env_interrupt_list okl4_env_interrupt_list_t;
+typedef struct okl4_env_profile_cell okl4_env_profile_cell_t;
+typedef struct okl4_env_profile_cpu okl4_env_profile_cpu_t;
+typedef struct okl4_env_profile_table okl4_env_profile_table_t;
+typedef struct okl4_env_segment okl4_env_segment_t;
+typedef struct okl4_env_segment_table okl4_env_segment_table_t;
+typedef struct okl4_firmware_segment okl4_firmware_segment_t;
+typedef struct okl4_firmware_segments_info okl4_firmware_segments_info_t;
+typedef void (*okl4_irq_callback_t)(okl4_interrupt_number_t irq, void *opaque);
+typedef struct okl4_kmmu okl4_kmmu_t;
+typedef struct okl4_ksp_user_agent okl4_ksp_user_agent_t;
+typedef struct okl4_pipe_data okl4_pipe_data_t;
+typedef struct okl4_pipe_ep_data okl4_pipe_ep_data_t;
+typedef struct okl4_link okl4_link_t;
+typedef struct okl4_links okl4_links_t;
+typedef struct okl4_machine_info okl4_machine_info_t;
+typedef struct okl4_merged_physpool okl4_merged_physpool_t;
+typedef struct okl4_microvisor_timer okl4_microvisor_timer_t;
+typedef struct okl4_schedule_profile_data okl4_schedule_profile_data_t;
+typedef struct okl4_shared_buffer okl4_shared_buffer_t;
+typedef struct okl4_shared_buffers_array okl4_shared_buffers_array_t;
+typedef struct okl4_tracebuffer_env okl4_tracebuffer_env_t;
+typedef struct okl4_vclient_info okl4_vclient_info_t;
+typedef struct okl4_vcpu_entry okl4_vcpu_entry_t;
+typedef struct okl4_vcpu_table okl4_vcpu_table_t;
+typedef struct okl4_virtmem_pool okl4_virtmem_pool_t;
+typedef struct okl4_virtual_interrupt_lines okl4_virtual_interrupt_lines_t;
+typedef struct okl4_vserver_info okl4_vserver_info_t;
+typedef struct okl4_vservices_service_descriptor okl4_vservices_service_descriptor_t;
+typedef struct okl4_vservices_transport_microvisor okl4_vservices_transport_microvisor_t;
+typedef struct okl4_vservices_transports okl4_vservices_transports_t;
+
+/*
+ * Return structures from system calls.
+ */
+/*lint -save -e958 -e959 implicit padding */
+struct _okl4_sys_axon_process_recv_return {
+    okl4_error_t error;
+    okl4_bool_t send_empty;
+};
+
+struct _okl4_sys_axon_set_halted_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_recv_area_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_recv_queue_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_recv_segment_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_send_area_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_send_queue_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_send_segment_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_trigger_send_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_ack_return {
+    okl4_interrupt_number_t irq;
+    uint8_t source;
+};
+
+struct _okl4_sys_interrupt_attach_private_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_attach_shared_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_detach_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_dist_enable_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_eoi_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_get_highest_priority_pending_return {
+    okl4_interrupt_number_t irq;
+    uint8_t source;
+};
+
+struct _okl4_sys_interrupt_get_payload_return {
+    okl4_error_t error;
+    okl4_virq_flags_t payload;
+};
+
+struct _okl4_sys_interrupt_limits_return {
+    okl4_count_t cpunumber;
+    okl4_count_t itnumber;
+};
+
+struct _okl4_sys_interrupt_mask_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_raise_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_binary_point_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_config_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_control_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_priority_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_priority_mask_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_targets_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_unmask_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_kdb_set_object_name_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_ksp_procedure_call_return {
+    okl4_error_t error;
+    okl4_ksp_arg_t ret0;
+    okl4_ksp_arg_t ret1;
+    okl4_ksp_arg_t ret2;
+};
+
+struct _okl4_sys_mmu_attach_segment_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_detach_segment_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_flush_range_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_flush_range_pn_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_lookup_page_return {
+    okl4_error_t error;
+    okl4_psize_tr_t offset;
+    okl4_mmu_lookup_size_t size;
+    _okl4_page_attribute_t page_attr;
+};
+
+struct _okl4_sys_mmu_lookup_pn_return {
+    okl4_mmu_lookup_index_t segment_index;
+    okl4_psize_pn_t offset_pn;
+    okl4_lsize_pn_t count_pn;
+    _okl4_page_attribute_t page_attr;
+};
+
+struct _okl4_sys_mmu_map_page_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_map_pn_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_unmap_page_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_unmap_pn_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_page_attrs_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_page_perms_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_pn_attrs_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_pn_perms_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_performance_null_syscall_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_pipe_control_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_pipe_recv_return {
+    okl4_error_t error;
+    okl4_ksize_t size;
+};
+
+struct _okl4_sys_pipe_send_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_priority_waive_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_get_register_return {
+    uint32_t reg_w0;
+    uint32_t reg_w1;
+    uint32_t reg_w2;
+    uint32_t reg_w3;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_get_registers_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_read_memory32_return {
+    uint32_t data;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_set_register_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_set_registers_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_write_memory32_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_metrics_status_suspended_return {
+    okl4_error_t error;
+    uint32_t power_suspend_version;
+    uint32_t power_suspend_running_count;
+};
+
+struct _okl4_sys_schedule_metrics_watch_suspended_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_cpu_disable_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_cpu_enable_return {
+    okl4_error_t error;
+    uint64_t timestamp;
+};
+
+struct _okl4_sys_schedule_profile_cpu_get_data_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_vcpu_disable_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_vcpu_enable_return {
+    okl4_error_t error;
+    uint64_t timestamp;
+};
+
+struct _okl4_sys_schedule_profile_vcpu_get_data_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_scheduler_suspend_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_cancel_return {
+    uint64_t remaining;
+    okl4_timer_flags_t old_flags;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_get_resolution_return {
+    uint64_t tick_freq;
+    uint32_t a;
+    uint32_t b;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_get_time_return {
+    uint64_t time;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_query_return {
+    uint64_t remaining;
+    okl4_timer_flags_t active_flags;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_start_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_reset_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_start_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_stop_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_switch_mode_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vinterrupt_clear_and_raise_return {
+    okl4_error_t error;
+    okl4_virq_flags_t payload;
+};
+
+struct _okl4_sys_vinterrupt_modify_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vinterrupt_raise_return {
+    okl4_error_t error;
+};
+
+/*lint -restore */
+
+/*
+ * Ensure type sizes have been correctly calculated by the
+ * code generator.  We test to see if the C compiler agrees
+ * with us about the size of the type.
+ */
+
+#if !defined(GLOBAL_STATIC_ASSERT)
+#if defined(__cplusplus)
+/* FIX: we should be able to use static_assert, but it doesn't compile */
+#define GLOBAL_STATIC_ASSERT(expr, msg)
+#else
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#define GLOBAL_STATIC_ASSERT(expr, msg) \
+        _Static_assert(expr, #msg);
+#else
+#define GLOBAL_STATIC_ASSERT(expr, msg)
+#endif
+#endif
+#endif
+
+
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_mpidr_t) == 8U,
+        __autogen_confused_about_sizeof_arm_mpidr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_mpidr_t) == 8U,
+        __autogen_confused_about_alignof_arm_mpidr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_function_t) == 4U,
+        __autogen_confused_about_sizeof_arm_psci_function)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_function_t) == 4U,
+        __autogen_confused_about_alignof_arm_psci_function)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_result_t) == 4U,
+        __autogen_confused_about_sizeof_arm_psci_result)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_result_t) == 4U,
+        __autogen_confused_about_alignof_arm_psci_result)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_suspend_state_t) == 4U,
+        __autogen_confused_about_sizeof_arm_psci_suspend_state)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_suspend_state_t) == 4U,
+        __autogen_confused_about_alignof_arm_psci_suspend_state)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_sctlr_t) == 4U,
+        __autogen_confused_about_sizeof_arm_sctlr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_sctlr_t) == 4U,
+        __autogen_confused_about_alignof_arm_sctlr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_smccc_arch_function_t) == 4U,
+        __autogen_confused_about_sizeof_arm_smccc_arch_function)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_smccc_arch_function_t) == 4U,
+        __autogen_confused_about_alignof_arm_smccc_arch_function)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_smccc_result_t) == 4U,
+        __autogen_confused_about_sizeof_arm_smccc_result)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_smccc_result_t) == 4U,
+        __autogen_confused_about_alignof_arm_smccc_result)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_register) == 8U,
+        __autogen_confused_about_sizeof_atomic_register)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_register) == 8U,
+        __autogen_confused_about_alignof_atomic_register)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_register_t) == 8U,
+        __autogen_confused_about_sizeof_atomic_register_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_register_t) == 8U,
+        __autogen_confused_about_alignof_atomic_register_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint16) == 2U,
+        __autogen_confused_about_sizeof_atomic_uint16)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint16) == 2U,
+        __autogen_confused_about_alignof_atomic_uint16)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint16_t) == 2U,
+        __autogen_confused_about_sizeof_atomic_uint16_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint16_t) == 2U,
+        __autogen_confused_about_alignof_atomic_uint16_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint32) == 4U,
+        __autogen_confused_about_sizeof_atomic_uint32)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint32) == 4U,
+        __autogen_confused_about_alignof_atomic_uint32)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint32_t) == 4U,
+        __autogen_confused_about_sizeof_atomic_uint32_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint32_t) == 4U,
+        __autogen_confused_about_alignof_atomic_uint32_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint64) == 8U,
+        __autogen_confused_about_sizeof_atomic_uint64)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint64) == 8U,
+        __autogen_confused_about_alignof_atomic_uint64)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint64_t) == 8U,
+        __autogen_confused_about_sizeof_atomic_uint64_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint64_t) == 8U,
+        __autogen_confused_about_alignof_atomic_uint64_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint8) == 1U,
+        __autogen_confused_about_sizeof_atomic_uint8)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint8) == 1U,
+        __autogen_confused_about_alignof_atomic_uint8)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint8_t) == 1U,
+        __autogen_confused_about_sizeof_atomic_uint8_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint8_t) == 1U,
+        __autogen_confused_about_alignof_atomic_uint8_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_data) == 12U,
+        __autogen_confused_about_sizeof_axon_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_data) == 4U,
+        __autogen_confused_about_alignof_axon_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_axon_data_info_t) == 8U,
+        __autogen_confused_about_sizeof_axon_data_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_axon_data_info_t) == 8U,
+        __autogen_confused_about_alignof_axon_data_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_ep_data) == 24U,
+        __autogen_confused_about_sizeof_axon_ep_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_ep_data) == 4U,
+        __autogen_confused_about_alignof_axon_ep_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_queue) == 12U,
+        __autogen_confused_about_sizeof_axon_queue)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_queue) == 4U,
+        __autogen_confused_about_alignof_axon_queue)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_queue_entry) == 24U,
+        __autogen_confused_about_sizeof_axon_queue_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_queue_entry) == 8U,
+        __autogen_confused_about_alignof_axon_queue_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_axon_queue_size_t) == 2U,
+        __autogen_confused_about_sizeof_axon_queue_size)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_axon_queue_size_t) == 2U,
+        __autogen_confused_about_alignof_axon_queue_size)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_rx) == 56U,
+        __autogen_confused_about_sizeof_axon_rx)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_rx) == 4U,
+        __autogen_confused_about_alignof_axon_rx)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_tx) == 48U,
+        __autogen_confused_about_sizeof_axon_tx)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_tx) == 4U,
+        __autogen_confused_about_alignof_axon_tx)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_axon_virq_flags_t) == 8U,
+        __autogen_confused_about_sizeof_axon_virq_flags)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_axon_virq_flags_t) == 8U,
+        __autogen_confused_about_alignof_axon_virq_flags)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_page_cache_t) == 4U,
+        __autogen_confused_about_sizeof_cache_attr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_page_cache_t) == 4U,
+        __autogen_confused_about_alignof_cache_attr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_range_item) == 16U,
+        __autogen_confused_about_sizeof_range_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_range_item) == 8U,
+        __autogen_confused_about_alignof_range_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_virtmem_item) == 16U,
+        __autogen_confused_about_sizeof_virtmem_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_virtmem_item) == 8U,
+        __autogen_confused_about_alignof_virtmem_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management_item) == 104U,
+        __autogen_confused_about_sizeof_cell_management_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management_item) == 8U,
+        __autogen_confused_about_alignof_cell_management_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management) == 8U,
+        __autogen_confused_about_sizeof_cell_management)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management) == 8U,
+        __autogen_confused_about_alignof_cell_management)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_segment_mapping) == 32U,
+        __autogen_confused_about_sizeof_segment_mapping)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_segment_mapping) == 8U,
+        __autogen_confused_about_alignof_segment_mapping)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management_segments) == 8U,
+        __autogen_confused_about_sizeof_cell_management_segments)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management_segments) == 8U,
+        __autogen_confused_about_alignof_cell_management_segments)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management_vcpus) == 4U,
+        __autogen_confused_about_sizeof_cell_management_vcpus)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management_vcpus) == 4U,
+        __autogen_confused_about_alignof_cell_management_vcpus)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_cpu_mode_t) == 4U,
+        __autogen_confused_about_sizeof_cpu_mode)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_cpu_mode_t) == 4U,
+        __autogen_confused_about_alignof_cpu_mode)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_env_hdr) == 4U,
+        __autogen_confused_about_sizeof_env_hdr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_env_hdr) == 2U,
+        __autogen_confused_about_alignof_env_hdr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_env_item) == 16U,
+        __autogen_confused_about_sizeof_env_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_env_item) == 8U,
+        __autogen_confused_about_alignof_env_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_env) == 8U,
+        __autogen_confused_about_sizeof_env)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_env) == 8U,
+        __autogen_confused_about_alignof_env)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_access_cell) == 16U,
+        __autogen_confused_about_sizeof_env_access_cell)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_access_cell) == 8U,
+        __autogen_confused_about_alignof_env_access_cell)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_page_perms_t) == 4U,
+        __autogen_confused_about_sizeof_page_perms)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_page_perms_t) == 4U,
+        __autogen_confused_about_alignof_page_perms)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_access_entry) == 48U,
+        __autogen_confused_about_sizeof_env_access_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_access_entry) == 8U,
+        __autogen_confused_about_alignof_env_access_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_access_table) == 24U,
+        __autogen_confused_about_sizeof_env_access_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_access_table) == 8U,
+        __autogen_confused_about_alignof_env_access_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_args) == 8U,
+        __autogen_confused_about_sizeof_env_args)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_args) == 8U,
+        __autogen_confused_about_alignof_env_args)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_interrupt_device_map) == 4U,
+        __autogen_confused_about_sizeof_env_interrupt_device_map)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_interrupt_device_map) == 4U,
+        __autogen_confused_about_alignof_env_interrupt_device_map)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_interrupt) == 4U,
+        __autogen_confused_about_sizeof_okl4_interrupt)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_interrupt) == 4U,
+        __autogen_confused_about_alignof_okl4_interrupt)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_interrupt_handle) == 8U,
+        __autogen_confused_about_sizeof_env_interrupt_handle)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_interrupt_handle) == 4U,
+        __autogen_confused_about_alignof_env_interrupt_handle)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_interrupt_list) == 24U,
+        __autogen_confused_about_sizeof_env_interrupt_list)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_interrupt_list) == 8U,
+        __autogen_confused_about_alignof_env_interrupt_list)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_profile_cell) == 48U,
+        __autogen_confused_about_sizeof_env_profile_cell)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_profile_cell) == 8U,
+        __autogen_confused_about_alignof_env_profile_cell)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_profile_cpu) == 4U,
+        __autogen_confused_about_sizeof_env_profile_cpu)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_profile_cpu) == 4U,
+        __autogen_confused_about_alignof_env_profile_cpu)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_profile_table) == 16U,
+        __autogen_confused_about_sizeof_env_profile_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_profile_table) == 8U,
+        __autogen_confused_about_alignof_env_profile_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_segment) == 24U,
+        __autogen_confused_about_sizeof_env_segment)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_segment) == 8U,
+        __autogen_confused_about_alignof_env_segment)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_segment_table) == 8U,
+        __autogen_confused_about_sizeof_env_segment_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_segment_table) == 8U,
+        __autogen_confused_about_alignof_env_segment_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_error_t) == 4U,
+        __autogen_confused_about_sizeof_error_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_error_t) == 4U,
+        __autogen_confused_about_alignof_error_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_firmware_segment) == 32U,
+        __autogen_confused_about_sizeof_firmware_segment)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_firmware_segment) == 8U,
+        __autogen_confused_about_alignof_firmware_segment)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_firmware_segments_info) == 8U,
+        __autogen_confused_about_sizeof_firmware_segments_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_firmware_segments_info) == 8U,
+        __autogen_confused_about_alignof_firmware_segments_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_gicd_icfgr_t) == 4U,
+        __autogen_confused_about_sizeof_gicd_icfgr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_gicd_icfgr_t) == 4U,
+        __autogen_confused_about_alignof_gicd_icfgr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_sgi_target_t) == 4U,
+        __autogen_confused_about_sizeof_sgi_target)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_sgi_target_t) == 4U,
+        __autogen_confused_about_alignof_sgi_target)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_gicd_sgir_t) == 4U,
+        __autogen_confused_about_sizeof_gicd_sgir)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_gicd_sgir_t) == 4U,
+        __autogen_confused_about_alignof_gicd_sgir)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_kmmu) == 4U,
+        __autogen_confused_about_sizeof_kmmu)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_kmmu) == 4U,
+        __autogen_confused_about_alignof_kmmu)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_ksp_user_agent) == 8U,
+        __autogen_confused_about_sizeof_ksp_user_agent)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_ksp_user_agent) == 4U,
+        __autogen_confused_about_alignof_ksp_user_agent)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_pipe_data) == 8U,
+        __autogen_confused_about_sizeof_pipe_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_pipe_data) == 4U,
+        __autogen_confused_about_alignof_pipe_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_pipe_ep_data) == 16U,
+        __autogen_confused_about_sizeof_pipe_ep_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_pipe_ep_data) == 4U,
+        __autogen_confused_about_alignof_pipe_ep_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_link_role_t) == 4U,
+        __autogen_confused_about_sizeof_link_role)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_link_role_t) == 4U,
+        __autogen_confused_about_alignof_link_role)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_link_transport_type_t) == 4U,
+        __autogen_confused_about_sizeof_link_transport_type)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_link_transport_type_t) == 4U,
+        __autogen_confused_about_alignof_link_transport_type)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_link) == 80U,
+        __autogen_confused_about_sizeof_link)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_link) == 8U,
+        __autogen_confused_about_alignof_link)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_links) == 8U,
+        __autogen_confused_about_sizeof_links)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_links) == 8U,
+        __autogen_confused_about_alignof_links)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_machine_info) == 24U,
+        __autogen_confused_about_sizeof_machine_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_machine_info) == 8U,
+        __autogen_confused_about_alignof_machine_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_merged_physpool) == 16U,
+        __autogen_confused_about_sizeof_merged_physpool)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_merged_physpool) == 8U,
+        __autogen_confused_about_alignof_merged_physpool)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_microvisor_timer) == 8U,
+        __autogen_confused_about_sizeof_microvisor_timer)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_microvisor_timer) == 4U,
+        __autogen_confused_about_alignof_microvisor_timer)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_mmu_lookup_index_t) == 4U,
+        __autogen_confused_about_sizeof_mmu_lookup_index)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_mmu_lookup_index_t) == 4U,
+        __autogen_confused_about_alignof_mmu_lookup_index)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_mmu_lookup_size_t) == 8U,
+        __autogen_confused_about_sizeof_mmu_lookup_size)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_mmu_lookup_size_t) == 8U,
+        __autogen_confused_about_alignof_mmu_lookup_size)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(_okl4_page_attribute_t) == 4U,
+        __autogen_confused_about_sizeof_page_attribute)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(_okl4_page_attribute_t) == 4U,
+        __autogen_confused_about_alignof_page_attribute)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_pipe_control_t) == 1U,
+        __autogen_confused_about_sizeof_pipe_control)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_pipe_control_t) == 1U,
+        __autogen_confused_about_alignof_pipe_control)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_pipe_state_t) == 1U,
+        __autogen_confused_about_sizeof_pipe_state)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_pipe_state_t) == 1U,
+        __autogen_confused_about_alignof_pipe_state)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_register_set_t) == 4U,
+        __autogen_confused_about_sizeof_register_set)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_register_set_t) == 4U,
+        __autogen_confused_about_alignof_register_set)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_register_and_set_t) == 4U,
+        __autogen_confused_about_sizeof_register_and_set)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_register_and_set_t) == 4U,
+        __autogen_confused_about_alignof_register_and_set)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cpu_registers) == 448U,
+        __autogen_confused_about_sizeof_registers)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cpu_registers) == 8U,
+        __autogen_confused_about_alignof_registers)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_cpu_registers_t) == 448U,
+        __autogen_confused_about_sizeof_registers_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_cpu_registers_t) == 8U,
+        __autogen_confused_about_alignof_registers_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_schedule_profile_data) == 32U,
+        __autogen_confused_about_sizeof_schedule_profile_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_schedule_profile_data) == 8U,
+        __autogen_confused_about_alignof_schedule_profile_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_scheduler_virq_flags_t) == 8U,
+        __autogen_confused_about_sizeof_scheduler_virq_flags)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_scheduler_virq_flags_t) == 8U,
+        __autogen_confused_about_alignof_scheduler_virq_flags)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_sdk_version_t) == 4U,
+        __autogen_confused_about_sizeof_sdk_version)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_sdk_version_t) == 4U,
+        __autogen_confused_about_alignof_sdk_version)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_shared_buffer) == 32U,
+        __autogen_confused_about_sizeof_shared_buffer)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_shared_buffer) == 8U,
+        __autogen_confused_about_alignof_shared_buffer)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_shared_buffers_array) == 16U,
+        __autogen_confused_about_sizeof_shared_buffers_array)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_shared_buffers_array) == 8U,
+        __autogen_confused_about_alignof_shared_buffers_array)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_timer_flags_t) == 4U,
+        __autogen_confused_about_sizeof_timer_flags)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_timer_flags_t) == 4U,
+        __autogen_confused_about_alignof_timer_flags)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_tracebuffer_buffer_header) == 40U,
+        __autogen_confused_about_sizeof_tracebuffer_buffer_header)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_tracebuffer_buffer_header) == 8U,
+        __autogen_confused_about_alignof_tracebuffer_buffer_header)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_tracebuffer_env) == 24U,
+        __autogen_confused_about_sizeof_tracebuffer_env)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_tracebuffer_env) == 8U,
+        __autogen_confused_about_alignof_tracebuffer_env)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_tracebuffer_header) == 40U,
+        __autogen_confused_about_sizeof_tracebuffer_header)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_tracebuffer_header) == 8U,
+        __autogen_confused_about_alignof_tracebuffer_header)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_class_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_class)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_class_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_class)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(_okl4_tracepoint_desc_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_desc)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(_okl4_tracepoint_desc_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_desc)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(_okl4_tracepoint_masks_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_masks)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(_okl4_tracepoint_masks_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_masks)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_tracepoint_entry_base) == 12U,
+        __autogen_confused_about_sizeof_tracepoint_entry_base)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_tracepoint_entry_base) == 4U,
+        __autogen_confused_about_alignof_tracepoint_entry_base)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_evt_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_evt)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_evt_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_evt)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_level_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_level)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_level_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_level)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_subsystem_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_subsystem)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_subsystem_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_subsystem)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_tracepoint_unpacked_entry) == 12U,
+        __autogen_confused_about_sizeof_tracepoint_unpacked_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_tracepoint_unpacked_entry) == 4U,
+        __autogen_confused_about_alignof_tracepoint_unpacked_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vclient_info) == 32U,
+        __autogen_confused_about_sizeof_vclient_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vclient_info) == 8U,
+        __autogen_confused_about_alignof_vclient_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vcpu_entry) == 24U,
+        __autogen_confused_about_sizeof_vcpu_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vcpu_entry) == 8U,
+        __autogen_confused_about_alignof_vcpu_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vcpu_table) == 16U,
+        __autogen_confused_about_sizeof_vcpu_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vcpu_table) == 8U,
+        __autogen_confused_about_alignof_vcpu_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vfp_ctrl_registers) == 8U,
+        __autogen_confused_about_sizeof_vfp_ctrl_registers)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vfp_ctrl_registers) == 4U,
+        __autogen_confused_about_alignof_vfp_ctrl_registers)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_ctrl_registers_t) == 8U,
+        __autogen_confused_about_sizeof_vfp_ctrl_registers_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_ctrl_registers_t) == 4U,
+        __autogen_confused_about_alignof_vfp_ctrl_registers_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_ops_t) == 4U,
+        __autogen_confused_about_sizeof_vfp_ops)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_ops_t) == 4U,
+        __autogen_confused_about_alignof_vfp_ops)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vfp_register) == 16U,
+        __autogen_confused_about_sizeof_vfp_register)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vfp_register) == 16U,
+        __autogen_confused_about_alignof_vfp_register)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_register_t) == 16U,
+        __autogen_confused_about_sizeof_vfp_register_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_register_t) == 16U,
+        __autogen_confused_about_alignof_vfp_register_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vfp_registers) == 528U,
+        __autogen_confused_about_sizeof_vfp_registers)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vfp_registers) == 16U,
+        __autogen_confused_about_alignof_vfp_registers)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_registers_t) == 528U,
+        __autogen_confused_about_sizeof_vfp_registers_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_registers_t) == 16U,
+        __autogen_confused_about_alignof_vfp_registers_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_virtmem_pool) == 16U,
+        __autogen_confused_about_sizeof_virtmem_pool)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_virtmem_pool) == 8U,
+        __autogen_confused_about_alignof_virtmem_pool)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_virtual_interrupt_lines) == 16U,
+        __autogen_confused_about_sizeof_virtual_interrupt_lines)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_virtual_interrupt_lines) == 8U,
+        __autogen_confused_about_alignof_virtual_interrupt_lines)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vserver_info) == 32U,
+        __autogen_confused_about_sizeof_vserver_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vserver_info) == 8U,
+        __autogen_confused_about_alignof_vserver_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vservices_service_descriptor) == 24U,
+        __autogen_confused_about_sizeof_vservices_service_descriptor)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_service_descriptor) == 8U,
+        __autogen_confused_about_alignof_vservices_service_descriptor)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vservices_transport_type_t) == 4U,
+        __autogen_confused_about_sizeof_vservices_transport_type)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vservices_transport_type_t) == 4U,
+        __autogen_confused_about_alignof_vservices_transport_type)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vservices_transport_microvisor) == 120U,
+        __autogen_confused_about_sizeof_vservices_transport_microvisor)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transport_microvisor) == 8U,
+        __autogen_confused_about_alignof_vservices_transport_microvisor)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vservices_transports) == 16U,
+        __autogen_confused_about_sizeof_vservices_transports)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
+        __autogen_confused_about_alignof_vservices_transports)
+#endif
+
+#else
+
+/**
+ *  okl4_arm_mpidr_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_AFF0_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF0_ARM_MPIDR (255)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF0_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF0_ARM_MPIDR (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF0_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF0_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_MASK_AFF1_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF1_ARM_MPIDR (255 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF1_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF1_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_MASK_AFF2_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF2_ARM_MPIDR (255 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF2_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF2_ARM_MPIDR (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF2_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF2_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_MASK_MT_ARM_MPIDR) */
+#define OKL4_ASM_MASK_MT_ARM_MPIDR (1 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_MT_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_MT_ARM_MPIDR (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_MT_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_MT_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ASM_MASK_U_ARM_MPIDR) */
+#define OKL4_ASM_MASK_U_ARM_MPIDR (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_U_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_U_ARM_MPIDR (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_U_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_U_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ASM_MASK_MP_ARM_MPIDR) */
+#define OKL4_ASM_MASK_MP_ARM_MPIDR (1 << 31)
+/*lint -esym(621, OKL4_ASM_SHIFT_MP_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_MP_ARM_MPIDR (31)
+/*lint -esym(621, OKL4_ASM_WIDTH_MP_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_MP_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ASM_MASK_AFF3_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF3_ARM_MPIDR (255 << 32)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF3_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF3_ARM_MPIDR (32)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF3_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF3_ARM_MPIDR (8)
+
+
+/**
+ *  uint32_t
+ **/
+/*lint -esym(621, OKL4_AXON_NUM_RECEIVE_QUEUES) */
+#define OKL4_AXON_NUM_RECEIVE_QUEUES (4)
+
+/*lint -esym(621, OKL4_AXON_NUM_SEND_QUEUES) */
+#define OKL4_AXON_NUM_SEND_QUEUES (4)
+
+/*lint -esym(621, _OKL4_POISON) */
+#define _OKL4_POISON (3735928559)
+
+/*lint -esym(621, OKL4_TRACEBUFFER_INVALID_REF) */
+#define OKL4_TRACEBUFFER_INVALID_REF (-1)
+
+/**
+ *  okl4_arm_psci_function_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_VERSION) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_VERSION (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_SUSPEND) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_SUSPEND (0x1)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_OFF) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_OFF (0x2)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_ON) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_ON (0x3)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_AFFINITY_INFO) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_AFFINITY_INFO (0x4)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE (0x5)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE (0x6)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU (0x7)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_OFF) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_OFF (0x8)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_RESET) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_RESET (0x9)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_FEATURES) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_FEATURES (0xa)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_FREEZE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_FREEZE (0xb)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND (0xc)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_NODE_HW_STATE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_NODE_HW_STATE (0xd)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND (0xe)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE (0xf)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY (0x10)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT (0x11)
+
+/**
+ *  okl4_arm_psci_result_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_SUCCESS) */
+#define OKL4_ASM_ARM_PSCI_RESULT_SUCCESS (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_INVALID_ADDRESS) */
+#define OKL4_ASM_ARM_PSCI_RESULT_INVALID_ADDRESS (0xfffffff7)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_DISABLED) */
+#define OKL4_ASM_ARM_PSCI_RESULT_DISABLED (0xfffffff8)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_NOT_PRESENT) */
+#define OKL4_ASM_ARM_PSCI_RESULT_NOT_PRESENT (0xfffffff9)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_INTERNAL_FAILURE) */
+#define OKL4_ASM_ARM_PSCI_RESULT_INTERNAL_FAILURE (0xfffffffa)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_ON_PENDING) */
+#define OKL4_ASM_ARM_PSCI_RESULT_ON_PENDING (0xfffffffb)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_ALREADY_ON) */
+#define OKL4_ASM_ARM_PSCI_RESULT_ALREADY_ON (0xfffffffc)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_DENIED) */
+#define OKL4_ASM_ARM_PSCI_RESULT_DENIED (0xfffffffd)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_INVALID_PARAMETERS) */
+#define OKL4_ASM_ARM_PSCI_RESULT_INVALID_PARAMETERS (0xfffffffe)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_NOT_SUPPORTED) */
+#define OKL4_ASM_ARM_PSCI_RESULT_NOT_SUPPORTED (0xffffffff)
+
+/**
+ *  okl4_arm_psci_suspend_state_t
+ **/
+
+/*lint -esym(621, OKL4_ARM_PSCI_POWER_LEVEL_CPU) */
+#define OKL4_ARM_PSCI_POWER_LEVEL_CPU (0)
+
+/*lint -esym(621, OKL4_ASM_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE (65535)
+/*lint -esym(621, OKL4_ASM_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_ASM_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (3 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (2)
+
+
+/**
+ *  okl4_arm_sctlr_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_MMU_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_MMU_ENABLE_ARM_SCTLR (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_MMU_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_DATA_CACHE_ENABLE_ARM_SCTLR (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_ASM_MASK_STACK_ALIGN_ARM_SCTLR (1 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_STACK_ALIGN_ARM_SCTLR (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_STACK_ALIGN_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_ASM_MASK_STACK_ALIGN_EL0_ARM_SCTLR (1 << 4)
+/*lint -esym(621, OKL4_ASM_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR (4)
+/*lint -esym(621, OKL4_ASM_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR (1 << 5)
+/*lint -esym(621, OKL4_ASM_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR (5)
+/*lint -esym(621, OKL4_ASM_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_ASM_MASK_OKL_HCR_EL2_DC_ARM_SCTLR (1 << 6)
+/*lint -esym(621, OKL4_ASM_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR (6)
+/*lint -esym(621, OKL4_ASM_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_IT_DISABLE_ARM_SCTLR (1 << 7)
+/*lint -esym(621, OKL4_ASM_SHIFT_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_IT_DISABLE_ARM_SCTLR (7)
+/*lint -esym(621, OKL4_ASM_WIDTH_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_IT_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_SETEND_DISABLE_ARM_SCTLR (1 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_SETEND_DISABLE_ARM_SCTLR (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_SETEND_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_ASM_MASK_USER_MASK_ACCESS_ARM_SCTLR (1 << 9)
+/*lint -esym(621, OKL4_ASM_SHIFT_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_USER_MASK_ACCESS_ARM_SCTLR (9)
+/*lint -esym(621, OKL4_ASM_WIDTH_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_USER_MASK_ACCESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_RESERVED11_ARM_SCTLR) */
+#define OKL4_ASM_MASK_RESERVED11_ARM_SCTLR (1 << 11)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED11_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_RESERVED11_ARM_SCTLR (11)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED11_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_RESERVED11_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (1 << 12)
+/*lint -esym(621, OKL4_ASM_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (12)
+/*lint -esym(621, OKL4_ASM_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_ASM_MASK_VECTORS_BIT_ARM_SCTLR (1 << 13)
+/*lint -esym(621, OKL4_ASM_SHIFT_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_VECTORS_BIT_ARM_SCTLR (13)
+/*lint -esym(621, OKL4_ASM_WIDTH_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_VECTORS_BIT_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_ASM_MASK_DCACHE_ZERO_ARM_SCTLR (1 << 14)
+/*lint -esym(621, OKL4_ASM_SHIFT_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_DCACHE_ZERO_ARM_SCTLR (14)
+/*lint -esym(621, OKL4_ASM_WIDTH_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_DCACHE_ZERO_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_USER_CACHE_TYPE_ARM_SCTLR (1 << 15)
+/*lint -esym(621, OKL4_ASM_SHIFT_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_USER_CACHE_TYPE_ARM_SCTLR (15)
+/*lint -esym(621, OKL4_ASM_WIDTH_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_USER_CACHE_TYPE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_ASM_MASK_NO_TRAP_WFI_ARM_SCTLR (1 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_NO_TRAP_WFI_ARM_SCTLR (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_NO_TRAP_WFI_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_NO_TRAP_WFE_ARM_SCTLR (1 << 18)
+/*lint -esym(621, OKL4_ASM_SHIFT_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_NO_TRAP_WFE_ARM_SCTLR (18)
+/*lint -esym(621, OKL4_ASM_WIDTH_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_NO_TRAP_WFE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_MASK_WRITE_EXEC_NEVER_ARM_SCTLR (1 << 19)
+/*lint -esym(621, OKL4_ASM_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR (19)
+/*lint -esym(621, OKL4_ASM_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR (1 << 20)
+/*lint -esym(621, OKL4_ASM_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR (20)
+/*lint -esym(621, OKL4_ASM_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_RESERVED22_ARM_SCTLR) */
+#define OKL4_ASM_MASK_RESERVED22_ARM_SCTLR (1 << 22)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED22_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_RESERVED22_ARM_SCTLR (22)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED22_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_RESERVED22_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_RESERVED23_ARM_SCTLR) */
+#define OKL4_ASM_MASK_RESERVED23_ARM_SCTLR (1 << 23)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED23_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_RESERVED23_ARM_SCTLR (23)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED23_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_RESERVED23_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_MASK_EL0_ENDIANNESS_ARM_SCTLR (1 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_EL0_ENDIANNESS_ARM_SCTLR (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_EL0_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR (1 << 25)
+/*lint -esym(621, OKL4_ASM_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR (25)
+/*lint -esym(621, OKL4_ASM_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_TEX_REMAP_ENABLE_ARM_SCTLR (1 << 28)
+/*lint -esym(621, OKL4_ASM_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR (28)
+/*lint -esym(621, OKL4_ASM_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR (1 << 29)
+/*lint -esym(621, OKL4_ASM_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR (29)
+/*lint -esym(621, OKL4_ASM_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (1)
+
+
+/**
+ *  okl4_arm_smccc_arch_function_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION) */
+#define OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES) */
+#define OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES (0x1)
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1) */
+#define OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1 (0x8000)
+
+/**
+ *  okl4_arm_smccc_result_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_RESULT_SUCCESS) */
+#define OKL4_ASM_ARM_SMCCC_RESULT_SUCCESS (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_RESULT_NOT_SUPPORTED) */
+#define OKL4_ASM_ARM_SMCCC_RESULT_NOT_SUPPORTED (0xffffffff)
+
+/**
+ *  okl4_count_t
+ **/
+/*lint -esym(621, OKL4_DEFAULT_PAGEBITS) */
+#define OKL4_DEFAULT_PAGEBITS (12)
+
+/** The maximum limit for segment index retured in mmu_lookup_segment. */
+/*lint -esym(621, OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK) */
+#define OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK (1023)
+
+/** The maximum limit for segment attachments to a KMMU. */
+/*lint -esym(621, OKL4_KMMU_MAX_SEGMENTS) */
+#define OKL4_KMMU_MAX_SEGMENTS (256)
+
+/*lint -esym(621, OKL4_PROFILE_NO_PCPUS) */
+#define OKL4_PROFILE_NO_PCPUS (-1)
+
+/**
+ *  okl4_kcap_t
+ **/
+/*lint -esym(621, OKL4_KCAP_INVALID) */
+#define OKL4_KCAP_INVALID (-1)
+
+/**
+ *  okl4_interrupt_number_t
+ **/
+/*lint -esym(621, OKL4_INTERRUPT_INVALID_IRQ) */
+#define OKL4_INTERRUPT_INVALID_IRQ (1023)
+
+/*lint -esym(621, OKL4_INVALID_VIRQ) */
+#define OKL4_INVALID_VIRQ (1023)
+
+/**
+ *  okl4_lsize_t
+ **/
+/*lint -esym(621, OKL4_DEFAULT_PAGESIZE) */
+#define OKL4_DEFAULT_PAGESIZE (4096)
+
+/**
+ *  okl4_laddr_t
+ **/
+/*lint -esym(621, OKL4_USER_AREA_END) */
+#define OKL4_USER_AREA_END (17592186044416)
+
+/**
+ *  okl4_axon_data_info_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_PENDING_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_PENDING_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_PENDING_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_PENDING_AXON_DATA_INFO (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_PENDING_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_PENDING_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_MASK_FAILURE_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_FAILURE_AXON_DATA_INFO (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_FAILURE_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_FAILURE_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_MASK_USR_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_USR_AXON_DATA_INFO (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_USR_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_USR_AXON_DATA_INFO (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_USR_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_USR_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_MASK_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_LADDR_AXON_DATA_INFO (2305843009213693951 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_ASM_PRESHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_PRESHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_LADDR_AXON_DATA_INFO (61)
+
+
+/**
+ *  okl4_axon_queue_size_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE (31)
+/*lint -esym(621, OKL4_ASM_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE (5)
+/*lint -esym(621, OKL4_ASM_MASK_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_MASK_MIN_ORDER_AXON_QUEUE_SIZE (31 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE (5)
+
+
+/**
+ *  okl4_axon_virq_flags_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_MASK_READY_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_SHIFT_READY_AXON_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_WIDTH_READY_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_MASK_FAULT_AXON_VIRQ_FLAGS (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_SHIFT_FAULT_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_WIDTH_FAULT_AXON_VIRQ_FLAGS (1)
+
+
+/**
+ *  okl4_page_cache_t
+ **/
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_WRITECOMBINE) */
+#define OKL4_ASM_PAGE_CACHE_WRITECOMBINE (0x0)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEFAULT) */
+#define OKL4_ASM_PAGE_CACHE_DEFAULT (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_IPC_RX) */
+#define OKL4_ASM_PAGE_CACHE_IPC_RX (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_IPC_TX) */
+#define OKL4_ASM_PAGE_CACHE_IPC_TX (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_TRACEBUFFER) */
+#define OKL4_ASM_PAGE_CACHE_TRACEBUFFER (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_WRITEBACK) */
+#define OKL4_ASM_PAGE_CACHE_WRITEBACK (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_IWB_RWA_ONC) */
+#define OKL4_ASM_PAGE_CACHE_IWB_RWA_ONC (0x2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_WRITETHROUGH) */
+#define OKL4_ASM_PAGE_CACHE_WRITETHROUGH (0x3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEVICE_GRE) */
+#define OKL4_ASM_PAGE_CACHE_DEVICE_GRE (0x4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEVICE_NGRE) */
+#define OKL4_ASM_PAGE_CACHE_DEVICE_NGRE (0x5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEVICE) */
+#define OKL4_ASM_PAGE_CACHE_DEVICE (0x6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_STRONG) */
+#define OKL4_ASM_PAGE_CACHE_STRONG (0x7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRNE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRNE (0x8000000)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_MASK) */
+#define OKL4_ASM_PAGE_CACHE_HW_MASK (0x8000000)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRE (0x8000004)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGRE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGRE (0x8000008)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_GRE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_GRE (0x800000c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_WA_NSH (0x8000011)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH (0x8000012)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH (0x8000013)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_NSH (0x8000014)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH (0x8000015)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH (0x8000016)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH (0x8000017)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH (0x8000018)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH (0x8000019)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH (0x800001a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH (0x800001b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH (0x800001c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH (0x800001d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH (0x800001e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH (0x800001f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH (0x8000021)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RA_NSH (0x8000022)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH (0x8000023)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_NSH (0x8000024)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH (0x8000025)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH (0x8000026)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH (0x8000027)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH (0x8000028)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH (0x8000029)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH (0x800002a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH (0x800002b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH (0x800002c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH (0x800002d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH (0x800002e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH (0x800002f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH (0x8000031)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH (0x8000032)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_NSH (0x8000033)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_NSH (0x8000034)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH (0x8000035)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH (0x8000036)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH (0x8000037)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH (0x8000038)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH (0x8000039)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH (0x800003a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH (0x800003b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH (0x800003c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH (0x800003d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH (0x800003e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH (0x800003f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_NSH (0x8000041)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_NSH (0x8000042)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH (0x8000043)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_NC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_NC_NSH (0x8000044)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_NSH (0x8000045)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_NSH (0x8000046)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH (0x8000047)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_NSH (0x8000048)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_NSH (0x8000049)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_NSH (0x800004a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_NSH (0x800004b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_NSH (0x800004c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_NSH (0x800004d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_NSH (0x800004e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_NSH (0x800004f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH (0x8000051)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH (0x8000052)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH (0x8000053)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_NSH (0x8000054)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_WA_NSH (0x8000055)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH (0x8000056)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH (0x8000057)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH (0x8000058)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH (0x8000059)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH (0x800005a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH (0x800005b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH (0x800005c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH (0x800005d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH (0x800005e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH (0x800005f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH (0x8000061)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH (0x8000062)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH (0x8000063)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_NSH (0x8000064)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH (0x8000065)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RA_NSH (0x8000066)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH (0x8000067)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH (0x8000068)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH (0x8000069)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH (0x800006a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH (0x800006b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH (0x800006c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH (0x800006d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH (0x800006e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH (0x800006f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH (0x8000071)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH (0x8000072)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH (0x8000073)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_NSH (0x8000074)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH (0x8000075)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH (0x8000076)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_NSH (0x8000077)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH (0x8000078)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH (0x8000079)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH (0x800007a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH (0x800007b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH (0x800007c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH (0x800007d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH (0x800007e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH (0x800007f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH (0x8000081)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH (0x8000082)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH (0x8000083)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_NSH (0x8000084)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH (0x8000085)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH (0x8000086)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH (0x8000087)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_NA_NSH (0x8000088)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH (0x8000089)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH (0x800008a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH (0x800008b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH (0x800008c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH (0x800008d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH (0x800008e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH (0x800008f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH (0x8000091)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH (0x8000092)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH (0x8000093)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_NSH (0x8000094)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH (0x8000095)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH (0x8000096)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH (0x8000097)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH (0x8000098)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_WA_NSH (0x8000099)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH (0x800009a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH (0x800009b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH (0x800009c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH (0x800009d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH (0x800009e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH (0x800009f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH (0x80000a1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH (0x80000a2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH (0x80000a3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_NSH (0x80000a4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH (0x80000a5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH (0x80000a6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH (0x80000a7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH (0x80000a8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH (0x80000a9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RA_NSH (0x80000aa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH (0x80000ab)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH (0x80000ac)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH (0x80000ad)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH (0x80000ae)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH (0x80000af)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH (0x80000b1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH (0x80000b2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH (0x80000b3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_NSH (0x80000b4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH (0x80000b5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH (0x80000b6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH (0x80000b7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH (0x80000b8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH (0x80000b9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH (0x80000ba)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RWA_NSH (0x80000bb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH (0x80000bc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH (0x80000bd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH (0x80000be)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH (0x80000bf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH (0x80000c1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH (0x80000c2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH (0x80000c3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_NSH (0x80000c4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH (0x80000c5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH (0x80000c6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH (0x80000c7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH (0x80000c8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH (0x80000c9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH (0x80000ca)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH (0x80000cb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_NA_NSH (0x80000cc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH (0x80000cd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH (0x80000ce)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH (0x80000cf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH (0x80000d1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH (0x80000d2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH (0x80000d3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_NSH (0x80000d4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH (0x80000d5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH (0x80000d6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH (0x80000d7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH (0x80000d8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH (0x80000d9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH (0x80000da)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH (0x80000db)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH (0x80000dc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_WA_NSH (0x80000dd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH (0x80000de)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH (0x80000df)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH (0x80000e1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH (0x80000e2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH (0x80000e3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_NSH (0x80000e4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH (0x80000e5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH (0x80000e6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH (0x80000e7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH (0x80000e8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH (0x80000e9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH (0x80000ea)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH (0x80000eb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH (0x80000ec)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH (0x80000ed)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RA_NSH (0x80000ee)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH (0x80000ef)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH (0x80000f1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH (0x80000f2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH (0x80000f3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_NSH (0x80000f4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH (0x80000f5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH (0x80000f6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH (0x80000f7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH (0x80000f8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH (0x80000f9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH (0x80000fa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH (0x80000fb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH (0x80000fc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH (0x80000fd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH (0x80000fe)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RWA_NSH (0x80000ff)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_WA_OSH (0x8000211)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH (0x8000212)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH (0x8000213)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_OSH (0x8000214)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH (0x8000215)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH (0x8000216)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH (0x8000217)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH (0x8000218)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH (0x8000219)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH (0x800021a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH (0x800021b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH (0x800021c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH (0x800021d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH (0x800021e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH (0x800021f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH (0x8000221)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RA_OSH (0x8000222)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH (0x8000223)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_OSH (0x8000224)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH (0x8000225)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH (0x8000226)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH (0x8000227)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH (0x8000228)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH (0x8000229)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH (0x800022a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH (0x800022b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH (0x800022c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH (0x800022d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH (0x800022e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH (0x800022f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH (0x8000231)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH (0x8000232)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_OSH (0x8000233)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_OSH (0x8000234)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH (0x8000235)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH (0x8000236)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH (0x8000237)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH (0x8000238)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH (0x8000239)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH (0x800023a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH (0x800023b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH (0x800023c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH (0x800023d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH (0x800023e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH (0x800023f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_OSH (0x8000241)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_OSH (0x8000242)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH (0x8000243)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_NC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_NC_OSH (0x8000244)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_OSH (0x8000245)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_OSH (0x8000246)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH (0x8000247)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_OSH (0x8000248)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_OSH (0x8000249)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_OSH (0x800024a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_OSH (0x800024b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_OSH (0x800024c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_OSH (0x800024d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_OSH (0x800024e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_OSH (0x800024f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH (0x8000251)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH (0x8000252)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH (0x8000253)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_OSH (0x8000254)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_WA_OSH (0x8000255)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH (0x8000256)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH (0x8000257)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH (0x8000258)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH (0x8000259)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH (0x800025a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH (0x800025b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH (0x800025c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH (0x800025d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH (0x800025e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH (0x800025f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH (0x8000261)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH (0x8000262)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH (0x8000263)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_OSH (0x8000264)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH (0x8000265)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RA_OSH (0x8000266)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH (0x8000267)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH (0x8000268)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH (0x8000269)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH (0x800026a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH (0x800026b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH (0x800026c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH (0x800026d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH (0x800026e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH (0x800026f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH (0x8000271)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH (0x8000272)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH (0x8000273)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_OSH (0x8000274)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH (0x8000275)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH (0x8000276)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_OSH (0x8000277)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH (0x8000278)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH (0x8000279)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH (0x800027a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH (0x800027b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH (0x800027c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH (0x800027d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH (0x800027e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH (0x800027f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH (0x8000281)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH (0x8000282)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH (0x8000283)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_OSH (0x8000284)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH (0x8000285)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH (0x8000286)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH (0x8000287)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_NA_OSH (0x8000288)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH (0x8000289)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH (0x800028a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH (0x800028b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH (0x800028c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH (0x800028d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH (0x800028e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH (0x800028f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH (0x8000291)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH (0x8000292)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH (0x8000293)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_OSH (0x8000294)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH (0x8000295)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH (0x8000296)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH (0x8000297)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH (0x8000298)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_WA_OSH (0x8000299)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH (0x800029a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH (0x800029b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH (0x800029c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH (0x800029d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH (0x800029e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH (0x800029f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH (0x80002a1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH (0x80002a2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH (0x80002a3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_OSH (0x80002a4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH (0x80002a5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH (0x80002a6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH (0x80002a7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH (0x80002a8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH (0x80002a9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RA_OSH (0x80002aa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH (0x80002ab)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH (0x80002ac)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH (0x80002ad)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH (0x80002ae)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH (0x80002af)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH (0x80002b1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH (0x80002b2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH (0x80002b3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_OSH (0x80002b4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH (0x80002b5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH (0x80002b6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH (0x80002b7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH (0x80002b8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH (0x80002b9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH (0x80002ba)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RWA_OSH (0x80002bb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH (0x80002bc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH (0x80002bd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH (0x80002be)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH (0x80002bf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH (0x80002c1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH (0x80002c2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH (0x80002c3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_OSH (0x80002c4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH (0x80002c5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH (0x80002c6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH (0x80002c7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH (0x80002c8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH (0x80002c9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH (0x80002ca)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH (0x80002cb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_NA_OSH (0x80002cc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH (0x80002cd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH (0x80002ce)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH (0x80002cf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH (0x80002d1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH (0x80002d2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH (0x80002d3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_OSH (0x80002d4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH (0x80002d5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH (0x80002d6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH (0x80002d7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH (0x80002d8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH (0x80002d9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH (0x80002da)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH (0x80002db)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH (0x80002dc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_WA_OSH (0x80002dd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH (0x80002de)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH (0x80002df)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH (0x80002e1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH (0x80002e2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH (0x80002e3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_OSH (0x80002e4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH (0x80002e5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH (0x80002e6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH (0x80002e7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH (0x80002e8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH (0x80002e9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH (0x80002ea)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH (0x80002eb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH (0x80002ec)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH (0x80002ed)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RA_OSH (0x80002ee)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH (0x80002ef)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH (0x80002f1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH (0x80002f2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH (0x80002f3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_OSH (0x80002f4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH (0x80002f5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH (0x80002f6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH (0x80002f7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH (0x80002f8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH (0x80002f9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH (0x80002fa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH (0x80002fb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH (0x80002fc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH (0x80002fd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH (0x80002fe)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RWA_OSH (0x80002ff)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_WA_ISH (0x8000311)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH (0x8000312)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH (0x8000313)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_ISH (0x8000314)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH (0x8000315)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH (0x8000316)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH (0x8000317)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH (0x8000318)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH (0x8000319)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH (0x800031a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH (0x800031b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH (0x800031c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH (0x800031d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH (0x800031e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH (0x800031f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH (0x8000321)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RA_ISH (0x8000322)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH (0x8000323)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_ISH (0x8000324)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH (0x8000325)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH (0x8000326)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH (0x8000327)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH (0x8000328)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH (0x8000329)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH (0x800032a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH (0x800032b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH (0x800032c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH (0x800032d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH (0x800032e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH (0x800032f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH (0x8000331)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH (0x8000332)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_ISH (0x8000333)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_ISH (0x8000334)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH (0x8000335)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH (0x8000336)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH (0x8000337)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH (0x8000338)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH (0x8000339)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH (0x800033a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH (0x800033b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH (0x800033c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH (0x800033d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH (0x800033e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH (0x800033f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_ISH (0x8000341)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_ISH (0x8000342)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH (0x8000343)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_NC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_NC_ISH (0x8000344)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_ISH (0x8000345)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_ISH (0x8000346)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH (0x8000347)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_ISH (0x8000348)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_ISH (0x8000349)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_ISH (0x800034a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_ISH (0x800034b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_ISH (0x800034c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_ISH (0x800034d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_ISH (0x800034e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_ISH (0x800034f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH (0x8000351)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH (0x8000352)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH (0x8000353)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_ISH (0x8000354)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_WA_ISH (0x8000355)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH (0x8000356)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH (0x8000357)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH (0x8000358)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH (0x8000359)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH (0x800035a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH (0x800035b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH (0x800035c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH (0x800035d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH (0x800035e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH (0x800035f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH (0x8000361)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH (0x8000362)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH (0x8000363)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_ISH (0x8000364)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH (0x8000365)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RA_ISH (0x8000366)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH (0x8000367)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH (0x8000368)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH (0x8000369)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH (0x800036a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH (0x800036b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH (0x800036c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH (0x800036d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH (0x800036e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH (0x800036f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH (0x8000371)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH (0x8000372)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH (0x8000373)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_ISH (0x8000374)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH (0x8000375)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH (0x8000376)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_ISH (0x8000377)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH (0x8000378)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH (0x8000379)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH (0x800037a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH (0x800037b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH (0x800037c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH (0x800037d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH (0x800037e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH (0x800037f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH (0x8000381)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH (0x8000382)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH (0x8000383)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_ISH (0x8000384)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH (0x8000385)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH (0x8000386)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH (0x8000387)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_NA_ISH (0x8000388)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH (0x8000389)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH (0x800038a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH (0x800038b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH (0x800038c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH (0x800038d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH (0x800038e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH (0x800038f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH (0x8000391)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH (0x8000392)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH (0x8000393)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_ISH (0x8000394)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH (0x8000395)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH (0x8000396)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH (0x8000397)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH (0x8000398)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_WA_ISH (0x8000399)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH (0x800039a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH (0x800039b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH (0x800039c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH (0x800039d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH (0x800039e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH (0x800039f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH (0x80003a1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH (0x80003a2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH (0x80003a3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_ISH (0x80003a4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH (0x80003a5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH (0x80003a6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH (0x80003a7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH (0x80003a8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH (0x80003a9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RA_ISH (0x80003aa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH (0x80003ab)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH (0x80003ac)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH (0x80003ad)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH (0x80003ae)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH (0x80003af)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH (0x80003b1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH (0x80003b2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH (0x80003b3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_ISH (0x80003b4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH (0x80003b5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH (0x80003b6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH (0x80003b7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH (0x80003b8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH (0x80003b9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH (0x80003ba)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RWA_ISH (0x80003bb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH (0x80003bc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH (0x80003bd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH (0x80003be)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH (0x80003bf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH (0x80003c1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH (0x80003c2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH (0x80003c3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_ISH (0x80003c4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH (0x80003c5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH (0x80003c6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH (0x80003c7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH (0x80003c8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH (0x80003c9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH (0x80003ca)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH (0x80003cb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_NA_ISH (0x80003cc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH (0x80003cd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH (0x80003ce)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH (0x80003cf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH (0x80003d1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH (0x80003d2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH (0x80003d3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_ISH (0x80003d4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH (0x80003d5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH (0x80003d6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH (0x80003d7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH (0x80003d8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH (0x80003d9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH (0x80003da)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH (0x80003db)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH (0x80003dc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_WA_ISH (0x80003dd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH (0x80003de)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH (0x80003df)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH (0x80003e1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH (0x80003e2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH (0x80003e3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_ISH (0x80003e4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH (0x80003e5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH (0x80003e6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH (0x80003e7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH (0x80003e8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH (0x80003e9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH (0x80003ea)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH (0x80003eb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH (0x80003ec)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH (0x80003ed)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RA_ISH (0x80003ee)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH (0x80003ef)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH (0x80003f1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH (0x80003f2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH (0x80003f3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_ISH (0x80003f4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH (0x80003f5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH (0x80003f6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH (0x80003f7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH (0x80003f8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH (0x80003f9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH (0x80003fa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH (0x80003fb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH (0x80003fc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH (0x80003fd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH (0x80003fe)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RWA_ISH (0x80003ff)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_MAX) */
+#define OKL4_ASM_PAGE_CACHE_MAX (0x80003ff)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_INVALID) */
+#define OKL4_ASM_PAGE_CACHE_INVALID (0xffffffff)
+
+/**
+ *  okl4_cpu_exec_mode
+ **/
+/*lint -esym(621, OKL4_ARM_MODE) */
+#define OKL4_ARM_MODE (0)
+
+/*lint -esym(621, OKL4_DEFAULT_MODE) */
+#define OKL4_DEFAULT_MODE (4)
+
+/*lint -esym(621, OKL4_JAZELLE_MODE) */
+#define OKL4_JAZELLE_MODE (2)
+
+/*lint -esym(621, OKL4_THUMBEE_MODE) */
+#define OKL4_THUMBEE_MODE (3)
+
+/*lint -esym(621, OKL4_THUMB_MODE) */
+#define OKL4_THUMB_MODE (1)
+
+/**
+ *  okl4_cpu_mode_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_EXEC_MODE_CPU_MODE) */
+#define OKL4_ASM_MASK_EXEC_MODE_CPU_MODE (7)
+/*lint -esym(621, OKL4_ASM_SHIFT_EXEC_MODE_CPU_MODE) */
+#define OKL4_ASM_SHIFT_EXEC_MODE_CPU_MODE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_EXEC_MODE_CPU_MODE) */
+#define OKL4_ASM_WIDTH_EXEC_MODE_CPU_MODE (3)
+/*lint -esym(621, OKL4_ASM_MASK_ENDIAN_CPU_MODE) */
+#define OKL4_ASM_MASK_ENDIAN_CPU_MODE (1 << 7)
+/*lint -esym(621, OKL4_ASM_SHIFT_ENDIAN_CPU_MODE) */
+#define OKL4_ASM_SHIFT_ENDIAN_CPU_MODE (7)
+/*lint -esym(621, OKL4_ASM_WIDTH_ENDIAN_CPU_MODE) */
+#define OKL4_ASM_WIDTH_ENDIAN_CPU_MODE (1)
+
+
+/**
+ *  okl4_page_perms_t
+ **/
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_NONE) */
+#define OKL4_ASM_PAGE_PERMS_NONE (0x0)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_X) */
+#define OKL4_ASM_PAGE_PERMS_X (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_W) */
+#define OKL4_ASM_PAGE_PERMS_W (0x2)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_WX) */
+#define OKL4_ASM_PAGE_PERMS_WX (0x3)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_R) */
+#define OKL4_ASM_PAGE_PERMS_R (0x4)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_RX) */
+#define OKL4_ASM_PAGE_PERMS_RX (0x5)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_RW) */
+#define OKL4_ASM_PAGE_PERMS_RW (0x6)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_RWX) */
+#define OKL4_ASM_PAGE_PERMS_RWX (0x7)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_MAX) */
+#define OKL4_ASM_PAGE_PERMS_MAX (0x7)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_INVALID) */
+#define OKL4_ASM_PAGE_PERMS_INVALID (0xffffffff)
+
+/**
+ *  okl4_error_t
+ **/
+/**
+    KSP returned OK
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_OK) */
+#define OKL4_ASM_ERROR_KSP_OK (0x0)
+/**
+    The operation succeeded
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_OK) */
+#define OKL4_ASM_ERROR_OK (0x0)
+/**
+    The target vCPU was already running.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_ALREADY_STARTED) */
+#define OKL4_ASM_ERROR_ALREADY_STARTED (0x1)
+/**
+    The target vCPU was not running.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_ALREADY_STOPPED) */
+#define OKL4_ASM_ERROR_ALREADY_STOPPED (0x2)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_AREA_TOO_BIG) */
+#define OKL4_ASM_ERROR_AXON_AREA_TOO_BIG (0x3)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_BAD_MESSAGE_SIZE) */
+#define OKL4_ASM_ERROR_AXON_BAD_MESSAGE_SIZE (0x4)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_INVALID_OFFSET) */
+#define OKL4_ASM_ERROR_AXON_INVALID_OFFSET (0x5)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_QUEUE_NOT_MAPPED) */
+#define OKL4_ASM_ERROR_AXON_QUEUE_NOT_MAPPED (0x6)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_QUEUE_NOT_READY) */
+#define OKL4_ASM_ERROR_AXON_QUEUE_NOT_READY (0x7)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED) */
+#define OKL4_ASM_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED (0x8)
+/**
+    A blocking operation was cancelled due to an abort of the operation.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_CANCELLED) */
+#define OKL4_ASM_ERROR_CANCELLED (0x9)
+/**
+    The operation failed due to an existing mapping.  Mapping
+    operations must not overlap an existing mapping.  Unmapping
+    must be performed at the same size as the original mapping.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_EXISTING_MAPPING) */
+#define OKL4_ASM_ERROR_EXISTING_MAPPING (0xa)
+/**
+    The operation requested with a segment failed due to
+    insufficient rights in the segment.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INSUFFICIENT_SEGMENT_RIGHTS) */
+#define OKL4_ASM_ERROR_INSUFFICIENT_SEGMENT_RIGHTS (0xb)
+/**
+    The operation did not complete because it was interrupted by a
+    preemption.  This error value is only used internally.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPTED) */
+#define OKL4_ASM_ERROR_INTERRUPTED (0xc)
+/**
+    Attempt to attach an interrupt to an IRQ number, when the
+    interrupt is already attached to an IRQ number
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPT_ALREADY_ATTACHED) */
+#define OKL4_ASM_ERROR_INTERRUPT_ALREADY_ATTACHED (0xd)
+/**
+    Attempt to use an IRQ number that is out of range, of
+    the wrong type, or not in the correct state
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPT_INVALID_IRQ) */
+#define OKL4_ASM_ERROR_INTERRUPT_INVALID_IRQ (0xe)
+/**
+    Attempt to operate on an unknown IRQ number
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPT_NOT_ATTACHED) */
+#define OKL4_ASM_ERROR_INTERRUPT_NOT_ATTACHED (0xf)
+/**
+    An invalid argument was provided.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_ARGUMENT) */
+#define OKL4_ASM_ERROR_INVALID_ARGUMENT (0x10)
+/**
+    The operation failed because one of the arguments does not refer to a
+    valid object.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_DESIGNATOR) */
+#define OKL4_ASM_ERROR_INVALID_DESIGNATOR (0x11)
+/**
+    The operation failed because the power_state
+    argument is invalid.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_POWER_STATE) */
+#define OKL4_ASM_ERROR_INVALID_POWER_STATE (0x12)
+/**
+    The operation failed because the given segment index does
+    not correspond to an attached physical segment.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_SEGMENT_INDEX) */
+#define OKL4_ASM_ERROR_INVALID_SEGMENT_INDEX (0x13)
+/**
+    A user provided address produced a read or write fault in the operation.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_MEMORY_FAULT) */
+#define OKL4_ASM_ERROR_MEMORY_FAULT (0x14)
+/**
+    The operation failed because there is no mapping at the
+    specified location.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_MISSING_MAPPING) */
+#define OKL4_ASM_ERROR_MISSING_MAPPING (0x15)
+/**
+    The delete operation failed because the KMMU context is not
+    empty.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NON_EMPTY_MMU_CONTEXT) */
+#define OKL4_ASM_ERROR_NON_EMPTY_MMU_CONTEXT (0x16)
+/**
+    The lookup operation failed because the given virtual address
+    of the given KMMU context is not mapped at the given physical
+    segment.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NOT_IN_SEGMENT) */
+#define OKL4_ASM_ERROR_NOT_IN_SEGMENT (0x17)
+/**
+    The operation failed because the caller is not on the last
+    online cpu.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NOT_LAST_CPU) */
+#define OKL4_ASM_ERROR_NOT_LAST_CPU (0x18)
+/**
+    Insufficient resources are available to perform the operation.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NO_RESOURCES) */
+#define OKL4_ASM_ERROR_NO_RESOURCES (0x19)
+/**
+    Operation failed because pipe was not in the required state.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_BAD_STATE) */
+#define OKL4_ASM_ERROR_PIPE_BAD_STATE (0x1a)
+/**
+    Operation failed because no messages are in the queue.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_EMPTY) */
+#define OKL4_ASM_ERROR_PIPE_EMPTY (0x1b)
+/**
+    Operation failed because no memory is available in the queue.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_FULL) */
+#define OKL4_ASM_ERROR_PIPE_FULL (0x1c)
+/**
+    Operation failed because the pipe is in reset or not ready.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_NOT_READY) */
+#define OKL4_ASM_ERROR_PIPE_NOT_READY (0x1d)
+/**
+    Message was truncated because receive buffer size is too small.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_RECV_OVERFLOW) */
+#define OKL4_ASM_ERROR_PIPE_RECV_OVERFLOW (0x1e)
+/**
+    The operation failed because at least one VCPU has a monitored
+    power state and is not currently suspended.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_POWER_VCPU_RESUMED) */
+#define OKL4_ASM_ERROR_POWER_VCPU_RESUMED (0x1f)
+/**
+    The operation requires a segment to be unused, or not attached
+    to an MMU context.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_SEGMENT_USED) */
+#define OKL4_ASM_ERROR_SEGMENT_USED (0x20)
+/*lint -esym(621, OKL4_ASM_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) */
+#define OKL4_ASM_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED (0x21)
+/**
+    The timer is already active, and was not reprogrammed.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_TIMER_ACTIVE) */
+#define OKL4_ASM_ERROR_TIMER_ACTIVE (0x22)
+/**
+    The timer has already been cancelled or expired.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_TIMER_CANCELLED) */
+#define OKL4_ASM_ERROR_TIMER_CANCELLED (0x23)
+/**
+    Operation failed due to a temporary condition, and may be retried.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_TRY_AGAIN) */
+#define OKL4_ASM_ERROR_TRY_AGAIN (0x24)
+/**
+    The non-blocking operation failed because it would
+    block on a resource.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_WOULD_BLOCK) */
+#define OKL4_ASM_ERROR_WOULD_BLOCK (0x25)
+/**
+    Insufficient resources
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_ALLOC_EXHAUSTED) */
+#define OKL4_ASM_ERROR_ALLOC_EXHAUSTED (0x26)
+/**
+    KSP specific error 0
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_0) */
+#define OKL4_ASM_ERROR_KSP_ERROR_0 (0x10000010)
+/**
+    KSP specific error 1
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_1) */
+#define OKL4_ASM_ERROR_KSP_ERROR_1 (0x10000011)
+/**
+    KSP specific error 2
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_2) */
+#define OKL4_ASM_ERROR_KSP_ERROR_2 (0x10000012)
+/**
+    KSP specific error 3
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_3) */
+#define OKL4_ASM_ERROR_KSP_ERROR_3 (0x10000013)
+/**
+    KSP specific error 4
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_4) */
+#define OKL4_ASM_ERROR_KSP_ERROR_4 (0x10000014)
+/**
+    KSP specific error 5
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_5) */
+#define OKL4_ASM_ERROR_KSP_ERROR_5 (0x10000015)
+/**
+    KSP specific error 6
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_6) */
+#define OKL4_ASM_ERROR_KSP_ERROR_6 (0x10000016)
+/**
+    KSP specific error 7
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_7) */
+#define OKL4_ASM_ERROR_KSP_ERROR_7 (0x10000017)
+/**
+    Invalid argument to KSP
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_INVALID_ARG) */
+#define OKL4_ASM_ERROR_KSP_INVALID_ARG (0x80000001)
+/**
+    KSP doesn't implement requested feature
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_NOT_IMPLEMENTED) */
+#define OKL4_ASM_ERROR_KSP_NOT_IMPLEMENTED (0x80000002)
+/**
+    User didn't supply rights for requested feature
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_INSUFFICIENT_RIGHTS) */
+#define OKL4_ASM_ERROR_KSP_INSUFFICIENT_RIGHTS (0x80000003)
+/**
+    Interrupt already registered
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_INTERRUPT_REGISTERED) */
+#define OKL4_ASM_ERROR_KSP_INTERRUPT_REGISTERED (0x80000004)
+/**
+    Requested operation is not implemented.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NOT_IMPLEMENTED) */
+#define OKL4_ASM_ERROR_NOT_IMPLEMENTED (0xffffffff)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_MAX) */
+#define OKL4_ASM_ERROR_MAX (0xffffffff)
+
+/**
+ *  okl4_gicd_icfgr_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_EDGE_GICD_ICFGR) */
+#define OKL4_ASM_MASK_EDGE_GICD_ICFGR (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_EDGE_GICD_ICFGR) */
+#define OKL4_ASM_SHIFT_EDGE_GICD_ICFGR (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_EDGE_GICD_ICFGR) */
+#define OKL4_ASM_WIDTH_EDGE_GICD_ICFGR (1)
+
+
+/**
+ *  okl4_sgi_target_t
+ **/
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_LISTED) */
+#define OKL4_ASM_SGI_TARGET_LISTED (0x0)
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_ALL_OTHERS) */
+#define OKL4_ASM_SGI_TARGET_ALL_OTHERS (0x1)
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_SELF) */
+#define OKL4_ASM_SGI_TARGET_SELF (0x2)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_MAX) */
+#define OKL4_ASM_SGI_TARGET_MAX (0x2)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_INVALID) */
+#define OKL4_ASM_SGI_TARGET_INVALID (0xffffffff)
+
+/**
+ *  okl4_gicd_sgir_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_SGIINTID_GICD_SGIR) */
+#define OKL4_ASM_MASK_SGIINTID_GICD_SGIR (15)
+/*lint -esym(621, OKL4_ASM_SHIFT_SGIINTID_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_SGIINTID_GICD_SGIR (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_SGIINTID_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_SGIINTID_GICD_SGIR (4)
+/*lint -esym(621, OKL4_ASM_MASK_NSATT_GICD_SGIR) */
+#define OKL4_ASM_MASK_NSATT_GICD_SGIR (1 << 15)
+/*lint -esym(621, OKL4_ASM_SHIFT_NSATT_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_NSATT_GICD_SGIR (15)
+/*lint -esym(621, OKL4_ASM_WIDTH_NSATT_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_NSATT_GICD_SGIR (1)
+/*lint -esym(621, OKL4_ASM_MASK_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_ASM_MASK_CPUTARGETLIST_GICD_SGIR (255 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_CPUTARGETLIST_GICD_SGIR (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_CPUTARGETLIST_GICD_SGIR (8)
+/*lint -esym(621, OKL4_ASM_MASK_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_ASM_MASK_TARGETLISTFILTER_GICD_SGIR (3 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_TARGETLISTFILTER_GICD_SGIR (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_TARGETLISTFILTER_GICD_SGIR (2)
+
+
+/**
+ *  okl4_link_role_t
+ **/
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_SYMMETRIC) */
+#define OKL4_ASM_LINK_ROLE_SYMMETRIC (0x0)
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_SERVER) */
+#define OKL4_ASM_LINK_ROLE_SERVER (0x1)
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_CLIENT) */
+#define OKL4_ASM_LINK_ROLE_CLIENT (0x2)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_MAX) */
+#define OKL4_ASM_LINK_ROLE_MAX (0x2)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_INVALID) */
+#define OKL4_ASM_LINK_ROLE_INVALID (0xffffffff)
+
+/**
+ *  okl4_link_transport_type_t
+ **/
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_SHARED_BUFFER (0x0)
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_AXONS) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_AXONS (0x1)
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_PIPES) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_PIPES (0x2)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_MAX) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_MAX (0x2)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_INVALID) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_INVALID (0xffffffff)
+
+/**
+ *  okl4_mmu_lookup_index_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_MASK_ERROR_MMU_LOOKUP_INDEX (65535)
+/*lint -esym(621, OKL4_ASM_SHIFT_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_SHIFT_ERROR_MMU_LOOKUP_INDEX (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_WIDTH_ERROR_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_ASM_MASK_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_MASK_INDEX_MMU_LOOKUP_INDEX (65535 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_SHIFT_INDEX_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_WIDTH_INDEX_MMU_LOOKUP_INDEX (16)
+
+
+/**
+ *  okl4_mmu_lookup_size_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_MASK_SEG_INDEX_MMU_LOOKUP_SIZE (1023)
+/*lint -esym(621, OKL4_ASM_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_ASM_MASK_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_MASK_SIZE_10_MMU_LOOKUP_SIZE (18014398509481983 << 10)
+/*lint -esym(621, OKL4_ASM_SHIFT_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_SHIFT_SIZE_10_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_ASM_WIDTH_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_WIDTH_SIZE_10_MMU_LOOKUP_SIZE (54)
+
+
+/**
+ *  okl4_nanoseconds_t
+ **/
+/** Timer period upper bound is (1 << 55) ns */
+/*lint -esym(621, OKL4_TIMER_MAX_PERIOD_NS) */
+#define OKL4_TIMER_MAX_PERIOD_NS (36028797018963968)
+
+/** Timer period lower bound is 1000000 ns */
+/*lint -esym(621, OKL4_TIMER_MIN_PERIOD_NS) */
+#define OKL4_TIMER_MIN_PERIOD_NS (1000000)
+
+/**
+ *  _okl4_page_attribute_t
+ **/
+
+
+/*lint -esym(621, _OKL4_ASM_MASK_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_MASK_RWX_PAGE_ATTRIBUTE (7)
+/*lint -esym(621, _OKL4_ASM_SHIFT_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_SHIFT_RWX_PAGE_ATTRIBUTE (0)
+/*lint -esym(621, _OKL4_ASM_WIDTH_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_WIDTH_RWX_PAGE_ATTRIBUTE (3)
+/*lint -esym(621, _OKL4_ASM_MASK_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_MASK_ATTRIB_PAGE_ATTRIBUTE (268435455 << 4)
+/*lint -esym(621, _OKL4_ASM_SHIFT_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_SHIFT_ATTRIB_PAGE_ATTRIBUTE (4)
+/*lint -esym(621, _OKL4_ASM_WIDTH_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_WIDTH_ATTRIB_PAGE_ATTRIBUTE (28)
+
+
+/**
+ *  okl4_pipe_control_t
+ **/
+
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_CLR_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_CLR_HALTED (4)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_RESET) */
+#define OKL4_PIPE_CONTROL_OP_RESET (0)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_SET_HALTED (3)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_RX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_RX_READY (2)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_TX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_TX_READY (1)
+
+/*lint -esym(621, OKL4_ASM_MASK_DO_OP_PIPE_CONTROL) */
+#define OKL4_ASM_MASK_DO_OP_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_DO_OP_PIPE_CONTROL) */
+#define OKL4_ASM_SHIFT_DO_OP_PIPE_CONTROL (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_DO_OP_PIPE_CONTROL) */
+#define OKL4_ASM_WIDTH_DO_OP_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_ASM_MASK_OPERATION_PIPE_CONTROL) */
+#define OKL4_ASM_MASK_OPERATION_PIPE_CONTROL (7 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_OPERATION_PIPE_CONTROL) */
+#define OKL4_ASM_SHIFT_OPERATION_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_OPERATION_PIPE_CONTROL) */
+#define OKL4_ASM_WIDTH_OPERATION_PIPE_CONTROL (3)
+
+
+/**
+ *  okl4_pipe_state_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_RESET_PIPE_STATE) */
+#define OKL4_ASM_MASK_RESET_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESET_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_RESET_PIPE_STATE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESET_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_RESET_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_HALTED_PIPE_STATE) */
+#define OKL4_ASM_MASK_HALTED_PIPE_STATE (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_HALTED_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_HALTED_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_RX_READY_PIPE_STATE) */
+#define OKL4_ASM_MASK_RX_READY_PIPE_STATE (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_RX_READY_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_RX_READY_PIPE_STATE (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_RX_READY_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_RX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_TX_READY_PIPE_STATE) */
+#define OKL4_ASM_MASK_TX_READY_PIPE_STATE (1 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_TX_READY_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_TX_READY_PIPE_STATE (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_TX_READY_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_TX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_MASK_RX_AVAILABLE_PIPE_STATE (1 << 4)
+/*lint -esym(621, OKL4_ASM_SHIFT_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_RX_AVAILABLE_PIPE_STATE (4)
+/*lint -esym(621, OKL4_ASM_WIDTH_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_RX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_MASK_TX_AVAILABLE_PIPE_STATE (1 << 5)
+/*lint -esym(621, OKL4_ASM_SHIFT_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_TX_AVAILABLE_PIPE_STATE (5)
+/*lint -esym(621, OKL4_ASM_WIDTH_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_TX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_WAITING_PIPE_STATE) */
+#define OKL4_ASM_MASK_WAITING_PIPE_STATE (1 << 6)
+/*lint -esym(621, OKL4_ASM_SHIFT_WAITING_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_WAITING_PIPE_STATE (6)
+/*lint -esym(621, OKL4_ASM_WIDTH_WAITING_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_WAITING_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_OVERQUOTA_PIPE_STATE) */
+#define OKL4_ASM_MASK_OVERQUOTA_PIPE_STATE (1 << 7)
+/*lint -esym(621, OKL4_ASM_SHIFT_OVERQUOTA_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_OVERQUOTA_PIPE_STATE (7)
+/*lint -esym(621, OKL4_ASM_WIDTH_OVERQUOTA_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_OVERQUOTA_PIPE_STATE (1)
+
+
+/**
+ *  okl4_power_state_t
+ **/
+/*lint -esym(621, OKL4_POWER_STATE_IDLE) */
+#define OKL4_POWER_STATE_IDLE (0)
+
+/*lint -esym(621, OKL4_POWER_STATE_PLATFORM_BASE) */
+#define OKL4_POWER_STATE_PLATFORM_BASE (256)
+
+/*lint -esym(621, OKL4_POWER_STATE_POWEROFF) */
+#define OKL4_POWER_STATE_POWEROFF (1)
+
+/**
+ *  okl4_register_set_t
+ **/
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_CPU_REGS) */
+#define OKL4_ASM_REGISTER_SET_CPU_REGS (0x0)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP_REGS (0x1)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP_CTRL_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP_CTRL_REGS (0x2)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP64_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP64_REGS (0x3)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP128_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP128_REGS (0x4)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_MAX) */
+#define OKL4_ASM_REGISTER_SET_MAX (0x4)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_INVALID) */
+#define OKL4_ASM_REGISTER_SET_INVALID (0xffffffff)
+
+/**
+ *  okl4_register_and_set_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_OFFSET_REGISTER_AND_SET) */
+#define OKL4_ASM_MASK_OFFSET_REGISTER_AND_SET (65535)
+/*lint -esym(621, OKL4_ASM_SHIFT_OFFSET_REGISTER_AND_SET) */
+#define OKL4_ASM_SHIFT_OFFSET_REGISTER_AND_SET (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_OFFSET_REGISTER_AND_SET) */
+#define OKL4_ASM_WIDTH_OFFSET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_ASM_MASK_SET_REGISTER_AND_SET) */
+#define OKL4_ASM_MASK_SET_REGISTER_AND_SET (65535 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_SET_REGISTER_AND_SET) */
+#define OKL4_ASM_SHIFT_SET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_SET_REGISTER_AND_SET) */
+#define OKL4_ASM_WIDTH_SET_REGISTER_AND_SET (16)
+
+
+/**
+ *  okl4_scheduler_virq_flags_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_ASM_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_ASM_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_ASM_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (1)
+
+
+/**
+ *  okl4_sdk_version_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_MAINTENANCE_SDK_VERSION) */
+#define OKL4_ASM_MASK_MAINTENANCE_SDK_VERSION (63)
+/*lint -esym(621, OKL4_ASM_SHIFT_MAINTENANCE_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_MAINTENANCE_SDK_VERSION (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_MAINTENANCE_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_MAINTENANCE_SDK_VERSION (6)
+/*lint -esym(621, OKL4_ASM_MASK_RELEASE_SDK_VERSION) */
+#define OKL4_ASM_MASK_RELEASE_SDK_VERSION (255 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_RELEASE_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_RELEASE_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_ASM_MASK_MINOR_SDK_VERSION) */
+#define OKL4_ASM_MASK_MINOR_SDK_VERSION (63 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_MINOR_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_MINOR_SDK_VERSION (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_MINOR_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_MINOR_SDK_VERSION (6)
+/*lint -esym(621, OKL4_ASM_MASK_MAJOR_SDK_VERSION) */
+#define OKL4_ASM_MASK_MAJOR_SDK_VERSION (15 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_MAJOR_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_MAJOR_SDK_VERSION (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_MAJOR_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_MAJOR_SDK_VERSION (4)
+/*lint -esym(621, OKL4_ASM_MASK_RES0_FLAG_SDK_VERSION) */
+#define OKL4_ASM_MASK_RES0_FLAG_SDK_VERSION (1 << 28)
+/*lint -esym(621, OKL4_ASM_SHIFT_RES0_FLAG_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_RES0_FLAG_SDK_VERSION (28)
+/*lint -esym(621, OKL4_ASM_WIDTH_RES0_FLAG_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_RES0_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_ASM_MASK_DEV_FLAG_SDK_VERSION) */
+#define OKL4_ASM_MASK_DEV_FLAG_SDK_VERSION (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_DEV_FLAG_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_DEV_FLAG_SDK_VERSION (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_DEV_FLAG_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_DEV_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_ASM_MASK_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_ASM_MASK_FORMAT_FLAG_SDK_VERSION (1 << 31)
+/*lint -esym(621, OKL4_ASM_SHIFT_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_FORMAT_FLAG_SDK_VERSION (31)
+/*lint -esym(621, OKL4_ASM_WIDTH_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_FORMAT_FLAG_SDK_VERSION (1)
+
+
+/**
+ *  okl4_timer_flags_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_ACTIVE_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_ACTIVE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_ACTIVE_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_ACTIVE_TIMER_FLAGS (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_ACTIVE_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_ACTIVE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_PERIODIC_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_PERIODIC_TIMER_FLAGS (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_PERIODIC_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_PERIODIC_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_ABSOLUTE_TIMER_FLAGS (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_ABSOLUTE_TIMER_FLAGS (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_ABSOLUTE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_UNITS_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_UNITS_TIMER_FLAGS (1 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_UNITS_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_UNITS_TIMER_FLAGS (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_UNITS_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_UNITS_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_ALIGN_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_ALIGN_TIMER_FLAGS (1 << 4)
+/*lint -esym(621, OKL4_ASM_SHIFT_ALIGN_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_ALIGN_TIMER_FLAGS (4)
+/*lint -esym(621, OKL4_ASM_WIDTH_ALIGN_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_ALIGN_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_WATCHDOG_TIMER_FLAGS (1 << 5)
+/*lint -esym(621, OKL4_ASM_SHIFT_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_WATCHDOG_TIMER_FLAGS (5)
+/*lint -esym(621, OKL4_ASM_WIDTH_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_WATCHDOG_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_RELOAD_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_RELOAD_TIMER_FLAGS (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_RELOAD_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_RELOAD_TIMER_FLAGS (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_RELOAD_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_RELOAD_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_TIMESLICE_TIMER_FLAGS (1 << 31)
+/*lint -esym(621, OKL4_ASM_SHIFT_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_TIMESLICE_TIMER_FLAGS (31)
+/*lint -esym(621, OKL4_ASM_WIDTH_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_TIMESLICE_TIMER_FLAGS (1)
+
+
+/**
+ *  okl4_tracepoint_class_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_THREAD_STATE) */
+#define OKL4_ASM_TRACEPOINT_CLASS_THREAD_STATE (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_SYSCALLS) */
+#define OKL4_ASM_TRACEPOINT_CLASS_SYSCALLS (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_PRIMARY) */
+#define OKL4_ASM_TRACEPOINT_CLASS_PRIMARY (0x2)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_SECONDARY) */
+#define OKL4_ASM_TRACEPOINT_CLASS_SECONDARY (0x3)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_TERTIARY) */
+#define OKL4_ASM_TRACEPOINT_CLASS_TERTIARY (0x4)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_MAX) */
+#define OKL4_ASM_TRACEPOINT_CLASS_MAX (0x4)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_INVALID) */
+#define OKL4_ASM_TRACEPOINT_CLASS_INVALID (0xffffffff)
+
+/**
+ *  _okl4_tracepoint_desc_t
+ **/
+
+
+/*lint -esym(621, _OKL4_ASM_MASK_ID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_ID_TRACEPOINT_DESC (255)
+/*lint -esym(621, _OKL4_ASM_SHIFT_ID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_ID_TRACEPOINT_DESC (0)
+/*lint -esym(621, _OKL4_ASM_WIDTH_ID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_ID_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_ASM_MASK_USER_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_USER_TRACEPOINT_DESC (1 << 8)
+/*lint -esym(621, _OKL4_ASM_SHIFT_USER_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_USER_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_ASM_WIDTH_USER_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_USER_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_ASM_MASK_BIN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_BIN_TRACEPOINT_DESC (1 << 9)
+/*lint -esym(621, _OKL4_ASM_SHIFT_BIN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_BIN_TRACEPOINT_DESC (9)
+/*lint -esym(621, _OKL4_ASM_WIDTH_BIN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_BIN_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_ASM_MASK_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_RECLEN_TRACEPOINT_DESC (63 << 10)
+/*lint -esym(621, _OKL4_ASM_SHIFT_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_RECLEN_TRACEPOINT_DESC (10)
+/*lint -esym(621, _OKL4_ASM_WIDTH_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_RECLEN_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_ASM_MASK_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_CPUID_TRACEPOINT_DESC (63 << 16)
+/*lint -esym(621, _OKL4_ASM_SHIFT_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_CPUID_TRACEPOINT_DESC (16)
+/*lint -esym(621, _OKL4_ASM_WIDTH_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_CPUID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_ASM_MASK_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_THREADID_TRACEPOINT_DESC (63 << 22)
+/*lint -esym(621, _OKL4_ASM_SHIFT_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_THREADID_TRACEPOINT_DESC (22)
+/*lint -esym(621, _OKL4_ASM_WIDTH_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_THREADID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_ASM_MASK__R1_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK__R1_TRACEPOINT_DESC (15 << 28)
+/*lint -esym(621, _OKL4_ASM_SHIFT__R1_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT__R1_TRACEPOINT_DESC (28)
+/*lint -esym(621, _OKL4_ASM_WIDTH__R1_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH__R1_TRACEPOINT_DESC (4)
+
+
+/**
+ *  _okl4_tracepoint_masks_t
+ **/
+
+
+/*lint -esym(621, _OKL4_ASM_MASK_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_MASK_CLASS_TRACEPOINT_MASKS (65535)
+/*lint -esym(621, _OKL4_ASM_SHIFT_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_SHIFT_CLASS_TRACEPOINT_MASKS (0)
+/*lint -esym(621, _OKL4_ASM_WIDTH_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_WIDTH_CLASS_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_ASM_MASK_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_MASK_SUBSYSTEM_TRACEPOINT_MASKS (65535 << 16)
+/*lint -esym(621, _OKL4_ASM_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_ASM_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS (16)
+
+
+/**
+ *  okl4_tracepoint_evt_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH) */
+#define OKL4_ASM_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH (0x2)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME) */
+#define OKL4_ASM_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME (0x3)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV (0x4)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_HALTED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_HALTED (0x5)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA (0x6)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE (0x7)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT (0x8)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA (0x9)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE (0xa)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT (0xb)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND (0xc)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ACK (0xd)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE (0xe)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED (0xf)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH (0x10)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE (0x11)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_EOI (0x12)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING (0x13)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD (0x14)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS (0x15)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_MASK (0x16)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE (0x17)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT (0x18)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG (0x19)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL (0x1a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY (0x1b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK (0x1c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS (0x1d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK (0x1e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_INTERACT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_INTERACT (0x1f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME (0x20)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL (0x21)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT (0x22)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT (0x23)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE (0x24)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN (0x25)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE (0x26)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN (0x27)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE (0x28)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PN (0x29)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE (0x2a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN (0x2b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS (0x2c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS (0x2d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS (0x2e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS (0x2f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL (0x30)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_CONTROL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_CONTROL (0x31)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_RECV) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_RECV (0x32)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_SEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_SEND (0x33)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE (0x34)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER (0x35)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS (0x36)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32 (0x37)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER (0x38)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS (0x39)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32 (0x3a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED (0x3b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED (0x3c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE (0x3d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE (0x3e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA (0x3f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE (0x40)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE (0x41)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA (0x42)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND (0x43)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_CANCEL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_CANCEL (0x44)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION (0x45)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_TIME (0x46)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_QUERY) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_QUERY (0x47)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_START) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_START (0x48)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC (0x49)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_RESET) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_RESET (0x4a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_START) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_START (0x4b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_STOP) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_STOP (0x4c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE (0x4d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV (0x4e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE (0x4f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE (0x50)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY (0x51)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE (0x52)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_MAX) */
+#define OKL4_ASM_TRACEPOINT_EVT_MAX (0x52)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_INVALID) */
+#define OKL4_ASM_TRACEPOINT_EVT_INVALID (0xffffffff)
+
+/**
+ *  okl4_tracepoint_level_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_DEBUG) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_DEBUG (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_INFO) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_INFO (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_WARN) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_WARN (0x2)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_CRITICAL) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_CRITICAL (0x3)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_MAX) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_MAX (0x3)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_INVALID) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_INVALID (0xffffffff)
+
+/**
+ *  okl4_tracepoint_subsystem_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_SCHEDULER) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_SCHEDULER (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_TRACE) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_TRACE (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_CORE) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_CORE (0x2)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_MAX) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_MAX (0x2)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_INVALID) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_INVALID (0xffffffff)
+
+/**
+ *  okl4_vfp_ops_t
+ **/
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VFP_OPS_MAX) */
+#define OKL4_ASM_VFP_OPS_MAX (0x0)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VFP_OPS_INVALID) */
+#define OKL4_ASM_VFP_OPS_INVALID (0xffffffff)
+
+/**
+ *  okl4_vservices_transport_type_t
+ **/
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_AXON) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_AXON (0x0)
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER (0x1)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_MAX) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_MAX (0x1)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_INVALID) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_INVALID (0xffffffff)
+
+
+#endif /* !ASSEMBLY */
+
+#endif /* __AUTO__MICROVISOR_TYPES_H__ */
+/** @} */
+/** @} */
diff --git a/include/microvisor/microvisor.h b/include/microvisor/microvisor.h
new file mode 100644
index 0000000..3bb8d64
--- /dev/null
+++ b/include/microvisor/microvisor.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MICROVISOR_H_
+#define _MICROVISOR_H_
+
+/**
+ * @defgroup lib_microvisor The Microvisor Library
+ *
+ * @{
+ *
+ * The Microvisor Library is the primary low-level API between the OKL4
+ * Microvisor and a Cell application or guest-OS. It also provides certain
+ * common data types such as structure definitions used in these interactions.
+ *
+ */
+
+/**
+ * Temporarily define _Bool to allow C++ compilation of
+ * OKL code that makes use of it.
+ */
+#if defined(__cplusplus) && !defined(_Bool)
+#define _OKL4_CPP_BOOL
+#define _Bool bool
+#endif
+
+#define OKL4_INLINE static inline
+
+#if defined(_lint) || defined(_splint)
+#define OKL4_FORCE_INLINE static
+#else
+#define OKL4_FORCE_INLINE static inline __attribute__((always_inline))
+#endif
+
+#include <microvisor/kernel/types.h>
+#include <microvisor/kernel/microvisor.h>
+#include <microvisor/kernel/syscalls.h>
+#include <microvisor/kernel/offsets.h>
+
+/** @} */
+
+/**
+ * Remove temporary definition of _Bool if it was defined
+ */
+#if defined(_OKL4_CPP_BOOL)
+#undef _Bool
+#undef _OKL4_CPP_BOOL
+#endif
+
+#endif /* _MICROVISOR_H_ */
diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h
new file mode 100644
index 0000000..b85a3f2
--- /dev/null
+++ b/include/soc/qcom/qseecomi.h
@@ -0,0 +1,723 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QSEECOMI_H_
+#define __QSEECOMI_H_
+
+#include <linux/qseecom.h>
+
+#define QSEECOM_KEY_ID_SIZE   32
+
+#define QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD  -19   /*0xFFFFFFED*/
+#define QSEOS_RESULT_FAIL_UNSUPPORTED_CE_PIPE -63
+#define QSEOS_RESULT_FAIL_KS_OP               -64
+#define QSEOS_RESULT_FAIL_KEY_ID_EXISTS       -65
+#define QSEOS_RESULT_FAIL_MAX_KEYS            -66
+#define QSEOS_RESULT_FAIL_SAVE_KS             -67
+#define QSEOS_RESULT_FAIL_LOAD_KS             -68
+#define QSEOS_RESULT_FAIL_KS_ALREADY_DONE     -69
+#define QSEOS_RESULT_FAIL_KEY_ID_DNE          -70
+#define QSEOS_RESULT_FAIL_INCORRECT_PSWD      -71
+#define QSEOS_RESULT_FAIL_MAX_ATTEMPT         -72
+#define QSEOS_RESULT_FAIL_PENDING_OPERATION   -73
+
+enum qseecom_command_scm_resp_type {
+	QSEOS_APP_ID = 0xEE01,
+	QSEOS_LISTENER_ID
+};
+
+enum qseecom_qceos_cmd_id {
+	QSEOS_APP_START_COMMAND      = 0x01,
+	QSEOS_APP_SHUTDOWN_COMMAND,
+	QSEOS_APP_LOOKUP_COMMAND,
+	QSEOS_REGISTER_LISTENER,
+	QSEOS_DEREGISTER_LISTENER,
+	QSEOS_CLIENT_SEND_DATA_COMMAND,
+	QSEOS_LISTENER_DATA_RSP_COMMAND,
+	QSEOS_LOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_GET_APP_STATE_COMMAND,
+	QSEOS_LOAD_SERV_IMAGE_COMMAND,
+	QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
+	QSEOS_APP_REGION_NOTIFICATION,
+	QSEOS_REGISTER_LOG_BUF_COMMAND,
+	QSEOS_RPMB_PROVISION_KEY_COMMAND,
+	QSEOS_RPMB_ERASE_COMMAND,
+	QSEOS_GENERATE_KEY  = 0x11,
+	QSEOS_DELETE_KEY,
+	QSEOS_MAX_KEY_COUNT,
+	QSEOS_SET_KEY,
+	QSEOS_UPDATE_KEY_USERINFO,
+	QSEOS_TEE_OPEN_SESSION,
+	QSEOS_TEE_INVOKE_COMMAND,
+	QSEOS_TEE_INVOKE_MODFD_COMMAND = QSEOS_TEE_INVOKE_COMMAND,
+	QSEOS_TEE_CLOSE_SESSION,
+	QSEOS_TEE_REQUEST_CANCELLATION,
+	QSEOS_CONTINUE_BLOCKED_REQ_COMMAND,
+	QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND = 0x1B,
+	QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST = 0x1C,
+	QSEOS_TEE_OPEN_SESSION_WHITELIST = 0x1D,
+	QSEOS_TEE_INVOKE_COMMAND_WHITELIST = 0x1E,
+	QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST = 0x1F,
+	QSEOS_FSM_LTEOTA_REQ_CMD = 0x109,
+	QSEOS_FSM_LTEOTA_REQ_RSP_CMD = 0x110,
+	QSEOS_FSM_IKE_REQ_CMD = 0x203,
+	QSEOS_FSM_IKE_REQ_RSP_CMD = 0x204,
+	QSEOS_FSM_OEM_FUSE_WRITE_ROW = 0x301,
+	QSEOS_FSM_OEM_FUSE_READ_ROW = 0x302,
+	QSEOS_FSM_ENCFS_REQ_CMD = 0x403,
+	QSEOS_FSM_ENCFS_REQ_RSP_CMD = 0x404,
+
+	QSEOS_CMD_MAX     = 0xEFFFFFFF
+};
+
+enum qseecom_qceos_cmd_status {
+	QSEOS_RESULT_SUCCESS = 0,
+	QSEOS_RESULT_INCOMPLETE,
+	QSEOS_RESULT_BLOCKED_ON_LISTENER,
+	QSEOS_RESULT_FAILURE  = 0xFFFFFFFF
+};
+
+enum qseecom_pipe_type {
+	QSEOS_PIPE_ENC = 0x1,
+	QSEOS_PIPE_ENC_XTS = 0x2,
+	QSEOS_PIPE_AUTH = 0x4,
+	QSEOS_PIPE_ENUM_FILL = 0x7FFFFFFF
+};
+
+/* QSEE Reentrancy support phase */
+enum qseecom_qsee_reentrancy_phase {
+	QSEE_REENTRANCY_PHASE_0 = 0,
+	QSEE_REENTRANCY_PHASE_1,
+	QSEE_REENTRANCY_PHASE_2,
+	QSEE_REENTRANCY_PHASE_3,
+	QSEE_REENTRANCY_PHASE_MAX = 0xFF
+};
+
+struct qsee_apps_region_info_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t addr;
+	uint32_t size;
+} __attribute__((__packed__));
+
+struct qsee_apps_region_info_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint64_t addr;
+	uint32_t size;
+} __attribute__((__packed__));
+
+struct qseecom_check_app_ireq {
+	uint32_t qsee_cmd_id;
+	char     app_name[MAX_APP_NAME_SIZE];
+} __attribute__((__packed__));
+
+struct qseecom_load_app_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;		/* Length of the mdt file */
+	uint32_t img_len;		/* Length of .bxx and .mdt files */
+	uint32_t phy_addr;		/* phy addr of the start of image */
+	char     app_name[MAX_APP_NAME_SIZE];	/* application name*/
+} __attribute__((__packed__));
+
+struct qseecom_load_app_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint64_t phy_addr;
+	char     app_name[MAX_APP_NAME_SIZE];
+} __attribute__((__packed__));
+
+struct qseecom_unload_app_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t  app_id;
+} __attribute__((__packed__));
+
+struct qseecom_load_lib_image_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint32_t phy_addr;
+} __attribute__((__packed__));
+
+struct qseecom_load_lib_image_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint64_t phy_addr;
+} __attribute__((__packed__));
+
+struct qseecom_unload_lib_image_ireq {
+	uint32_t qsee_cmd_id;
+} __attribute__((__packed__));
+
+struct qseecom_register_listener_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t sb_ptr;
+	uint32_t sb_len;
+} __attribute__((__packed__));
+
+struct qseecom_register_listener_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint64_t sb_ptr;
+	uint32_t sb_len;
+} __attribute__((__packed__));
+
+struct qseecom_unregister_listener_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t  listener_id;
+} __attribute__((__packed__));
+
+struct qseecom_client_send_data_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+	uint32_t req_ptr;
+	uint32_t req_len;
+	uint32_t rsp_ptr;/* First 4 bytes should be the return status */
+	uint32_t rsp_len;
+	uint32_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+} __attribute__((__packed__));
+
+struct qseecom_client_send_data_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+	uint64_t req_ptr;
+	uint32_t req_len;
+	uint64_t rsp_ptr;
+	uint32_t rsp_len;
+	uint64_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+} __attribute__((__packed__));
+
+struct qseecom_reg_log_buf_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t phy_addr;
+	uint32_t len;
+} __attribute__((__packed__));
+
+struct qseecom_reg_log_buf_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint64_t phy_addr;
+	uint32_t len;
+} __attribute__((__packed__));
+
+/* send_data resp */
+struct qseecom_client_listener_data_irsp {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t status;
+	uint32_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+} __attribute__((__packed__));
+
+struct qseecom_client_listener_data_64bit_irsp {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t status;
+	uint64_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+} __attribute__((__packed__));
+
+/*
+ * struct qseecom_command_scm_resp - qseecom response buffer
+ * @cmd_status: value from enum tz_sched_cmd_status
+ * @sb_in_rsp_addr: points to physical location of response
+ *                buffer
+ * @sb_in_rsp_len: length of command response
+ */
+struct qseecom_command_scm_resp {
+	uint32_t result;
+	enum qseecom_command_scm_resp_type resp_type;
+	unsigned int data;
+} __attribute__((__packed__));
+
+struct qseecom_rpmb_provision_key {
+	uint32_t key_type;
+};
+
+struct qseecom_client_send_service_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t key_type; /* in */
+	unsigned int req_len; /* in */
+	uint32_t rsp_ptr; /* in/out */
+	unsigned int rsp_len; /* in/out */
+} __attribute__((__packed__));
+
+struct qseecom_client_send_service_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t key_type;
+	unsigned int req_len;
+	uint64_t rsp_ptr;
+	unsigned int rsp_len;
+} __attribute__((__packed__));
+
+struct qseecom_key_generate_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+} __attribute__((__packed__));
+
+struct qseecom_key_select_ireq {
+	uint32_t qsee_command_id;
+	uint32_t ce;
+	uint32_t pipe;
+	uint32_t pipe_type;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+} __attribute__((__packed__));
+
+struct qseecom_key_delete_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+
+} __attribute__((__packed__));
+
+struct qseecom_key_userinfo_update_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t current_hash32[QSEECOM_HASH_SIZE];
+	uint8_t new_hash32[QSEECOM_HASH_SIZE];
+} __attribute__((__packed__));
+
+struct qseecom_key_max_count_query_ireq {
+	uint32_t flags;
+} __attribute__((__packed__));
+
+struct qseecom_key_max_count_query_irsp {
+	uint32_t max_key_count;
+} __attribute__((__packed__));
+
+struct qseecom_qteec_ireq {
+	uint32_t    qsee_cmd_id;
+	uint32_t    app_id;
+	uint32_t    req_ptr;
+	uint32_t    req_len;
+	uint32_t    resp_ptr;
+	uint32_t    resp_len;
+	uint32_t    sglistinfo_ptr;
+	uint32_t    sglistinfo_len;
+} __attribute__((__packed__));
+
+struct qseecom_qteec_64bit_ireq {
+	uint32_t    qsee_cmd_id;
+	uint32_t    app_id;
+	uint64_t    req_ptr;
+	uint32_t    req_len;
+	uint64_t    resp_ptr;
+	uint32_t    resp_len;
+	uint64_t    sglistinfo_ptr;
+	uint32_t    sglistinfo_len;
+} __attribute__((__packed__));
+
+struct qseecom_client_send_fsm_key_req {
+	uint32_t qsee_cmd_id;
+	uint32_t req_ptr;
+	uint32_t req_len;
+	uint32_t rsp_ptr;
+	uint32_t rsp_len;
+} __attribute__((__packed__));
+
+struct qseecom_continue_blocked_request_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_or_session_id; /*legacy: app_id; smcinvoke: session_id*/
+} __attribute__((__packed__));
+
+/**********      ARMV8 SMC INTERFACE TZ MACRO     *******************/
+
+#define TZ_SVC_APP_MGR                   1     /* Application management */
+#define TZ_SVC_LISTENER                  2     /* Listener service management */
+#define TZ_SVC_EXTERNAL                  3     /* External image loading */
+#define TZ_SVC_RPMB                      4     /* RPMB */
+#define TZ_SVC_KEYSTORE                  5     /* Keystore management */
+#define TZ_SVC_ES                        16    /* Enterprise Security */
+#define TZ_SVC_MDTP                      18    /* Mobile Device Theft */
+
+/*----------------------------------------------------------------------------
+ * Owning Entity IDs (defined by ARM SMC doc)
+ * ---------------------------------------------------------------------------
+ */
+#define TZ_OWNER_ARM                     0     /** ARM Architecture call ID */
+#define TZ_OWNER_CPU                     1     /** CPU service call ID */
+#define TZ_OWNER_SIP                     2     /** SIP service call ID */
+#define TZ_OWNER_OEM                     3     /** OEM service call ID */
+#define TZ_OWNER_STD                     4     /** Standard service call ID */
+
+/** Values 5-47 are reserved for future use */
+
+/** Trusted Application call IDs */
+#define TZ_OWNER_TZ_APPS                 48
+#define TZ_OWNER_TZ_APPS_RESERVED        49
+/** Trusted OS Call IDs */
+#define TZ_OWNER_QSEE_OS                 50
+#define TZ_OWNER_MOBI_OS                 51
+#define TZ_OWNER_OS_RESERVED_3           52
+#define TZ_OWNER_OS_RESERVED_4           53
+#define TZ_OWNER_OS_RESERVED_5           54
+#define TZ_OWNER_OS_RESERVED_6           55
+#define TZ_OWNER_OS_RESERVED_7           56
+#define TZ_OWNER_OS_RESERVED_8           57
+#define TZ_OWNER_OS_RESERVED_9           58
+#define TZ_OWNER_OS_RESERVED_10          59
+#define TZ_OWNER_OS_RESERVED_11          60
+#define TZ_OWNER_OS_RESERVED_12          61
+#define TZ_OWNER_OS_RESERVED_13          62
+#define TZ_OWNER_OS_RESERVED_14          63
+
+#define TZ_SVC_INFO                      6    /* Misc. information services */
+
+/** Trusted Application call groups */
+#define TZ_SVC_APP_ID_PLACEHOLDER        0    /* SVC bits will contain App ID */
+
+/** General helper macro to create a bitmask from bits low to high. */
+#define TZ_MASK_BITS(h, l)     ((0xffffffff >> (32 - ((h - l) + 1))) << l)
+
+/*
+ * Macro used to define an SMC ID based on the owner ID,
+ * service ID, and function number.
+ */
+#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
+	((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
+
+#define TZ_SYSCALL_PARAM_NARGS_MASK  TZ_MASK_BITS(3, 0)
+#define TZ_SYSCALL_PARAM_TYPE_MASK   TZ_MASK_BITS(1, 0)
+
+#define TZ_SYSCALL_CREATE_PARAM_ID(nargs, p1, p2, p3, \
+	p4, p5, p6, p7, p8, p9, p10) \
+	((nargs&TZ_SYSCALL_PARAM_NARGS_MASK)+ \
+	((p1&TZ_SYSCALL_PARAM_TYPE_MASK)<<4)+ \
+	((p2&TZ_SYSCALL_PARAM_TYPE_MASK)<<6)+ \
+	((p3&TZ_SYSCALL_PARAM_TYPE_MASK)<<8)+ \
+	((p4&TZ_SYSCALL_PARAM_TYPE_MASK)<<10)+ \
+	((p5&TZ_SYSCALL_PARAM_TYPE_MASK)<<12)+ \
+	((p6&TZ_SYSCALL_PARAM_TYPE_MASK)<<14)+ \
+	((p7&TZ_SYSCALL_PARAM_TYPE_MASK)<<16)+ \
+	((p8&TZ_SYSCALL_PARAM_TYPE_MASK)<<18)+ \
+	((p9&TZ_SYSCALL_PARAM_TYPE_MASK)<<20)+ \
+	((p10&TZ_SYSCALL_PARAM_TYPE_MASK)<<22))
+
+/*
+ * Macros used to create the Parameter ID associated with the syscall
+ */
+#define TZ_SYSCALL_CREATE_PARAM_ID_0 0
+#define TZ_SYSCALL_CREATE_PARAM_ID_1(p1) \
+	TZ_SYSCALL_CREATE_PARAM_ID(1, p1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_2(p1, p2) \
+	TZ_SYSCALL_CREATE_PARAM_ID(2, p1, p2, 0, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_3(p1, p2, p3) \
+	TZ_SYSCALL_CREATE_PARAM_ID(3, p1, p2, p3, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_4(p1, p2, p3, p4) \
+	TZ_SYSCALL_CREATE_PARAM_ID(4, p1, p2, p3, p4, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_5(p1, p2, p3, p4, p5) \
+	TZ_SYSCALL_CREATE_PARAM_ID(5, p1, p2, p3, p4, p5, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_6(p1, p2, p3, p4, p5, p6) \
+	TZ_SYSCALL_CREATE_PARAM_ID(6, p1, p2, p3, p4, p5, p6, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_7(p1, p2, p3, p4, p5, p6, p7) \
+	TZ_SYSCALL_CREATE_PARAM_ID(7, p1, p2, p3, p4, p5, p6, p7, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_8(p1, p2, p3, p4, p5, p6, p7, p8) \
+	TZ_SYSCALL_CREATE_PARAM_ID(8, p1, p2, p3, p4, p5, p6, p7, p8, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_9(p1, p2, p3, p4, p5, p6, p7, p8, p9) \
+	TZ_SYSCALL_CREATE_PARAM_ID(9, p1, p2, p3, p4, p5, p6, p7, p8, p9, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_10(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) \
+	TZ_SYSCALL_CREATE_PARAM_ID(10, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10)
+
+/*
+ * Macro used to obtain the Parameter ID associated with the syscall
+ */
+#define TZ_SYSCALL_GET_PARAM_ID(CMD_ID)        CMD_ID ## _PARAM_ID
+
+/** Helper macro to extract the owning entity from the SMC ID. */
+#define TZ_SYSCALL_OWNER_ID(r0)   ((r0 & TZ_MASK_BITS(29, 24)) >> 24)
+
+/** Helper macro for checking whether an owning entity is of type trusted OS. */
+#define IS_OWNER_TRUSTED_OS(owner_id) \
+			(((owner_id >= 50) && (owner_id <= 63)) ? 1:0)
+
+#define TZ_SYSCALL_PARAM_TYPE_VAL              0x0     /* type of value */
+#define TZ_SYSCALL_PARAM_TYPE_BUF_RO           0x1     /* type of buffer RO */
+#define TZ_SYSCALL_PARAM_TYPE_BUF_RW           0x2     /* type of buffer RW */
+
+#define TZ_OS_APP_START_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x01)
+
+#define TZ_OS_APP_START_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_SHUTDOWN_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x02)
+
+#define TZ_OS_APP_SHUTDOWN_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_LOOKUP_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x03)
+
+#define TZ_OS_APP_LOOKUP_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_GET_STATE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x04)
+
+#define TZ_OS_APP_GET_STATE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_REGION_NOTIFICATION_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x05)
+
+#define TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_REGISTER_LOG_BUFFER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x06)
+
+#define TZ_OS_REGISTER_LOG_BUFFER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LOAD_SERVICES_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x07)
+
+#define TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x08)
+
+#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_REGISTER_LISTENER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x01)
+
+#define TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x06)
+
+#define TZ_OS_REGISTER_LISTENER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_DEREGISTER_LISTENER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x02)
+
+#define TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x03)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x01)
+
+#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_QSAPP_SEND_DATA_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x01)
+
+
+#define TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x02)
+
+#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_INFO_IS_SVC_AVAILABLE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x01)
+
+#define TZ_INFO_IS_SVC_AVAILABLE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_INFO_GET_FEATURE_VERSION_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x03)
+
+#define TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_RPMB_PROVISION_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x01)
+
+#define TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_RPMB_ERASE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x02)
+
+#define TZ_OS_RPMB_ERASE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x03)
+
+#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_KS_GEN_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x01)
+
+#define TZ_OS_KS_GEN_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_DEL_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x02)
+
+#define TZ_OS_KS_DEL_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_GET_MAX_KEYS_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x03)
+
+#define TZ_OS_KS_GET_MAX_KEYS_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_SET_PIPE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x04)
+
+#define TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_UPDATE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x05)
+
+#define TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_SAVE_PARTITION_HASH_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, 0x01)
+
+#define TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x02)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_CLOSE_SESSION_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x03)
+
+#define TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x04)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID				\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x05)
+
+#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID			\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_MDTP_CIPHER_DIP_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_MDTP, 0x1)
+
+#define TZ_MDTP_CIPHER_DIP_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RO, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x04)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x07)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x06)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_7( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID			\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x07)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID		\
+	TZ_SYSCALL_CREATE_PARAM_ID_7(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID			\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x09)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID		\
+	TZ_SYSCALL_CREATE_PARAM_ID_7(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x05)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_4( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#endif /* __QSEECOMI_H_ */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index ca2787d..6527884 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -11,3 +11,7 @@
 ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),)
 no-export-headers += kvm_para.h
 endif
+
+ifneq ($(VSERVICES_SUPPORT), "")
+include include/linux/Kbuild.vservices
+endif
diff --git a/include/uapi/linux/qseecom.h b/include/uapi/linux/qseecom.h
new file mode 100644
index 0000000..6dfe045
--- /dev/null
+++ b/include/uapi/linux/qseecom.h
@@ -0,0 +1,413 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2017, 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UAPI_QSEECOM_H_
+#define _UAPI_QSEECOM_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define MAX_ION_FD  4
+#define MAX_APP_NAME_SIZE  64
+#define QSEECOM_HASH_SIZE  32
+
+/* qseecom_ta_heap allocation retry delay (ms) and max attemp count */
+#define QSEECOM_TA_ION_ALLOCATE_DELAY           50
+#define QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP      20
+
+#define ICE_KEY_SIZE 32
+#define ICE_SALT_SIZE 32
+
+/*
+ * struct qseecom_register_listener_req -
+ *      for register listener ioctl request
+ * @listener_id - service id (shared between userspace and QSE)
+ * @ifd_data_fd - ion handle
+ * @virt_sb_base - shared buffer base in user space
+ * @sb_size - shared buffer size
+ */
+struct qseecom_register_listener_req {
+	uint32_t listener_id; /* in */
+	int32_t ifd_data_fd; /* in */
+	void *virt_sb_base; /* in */
+	uint32_t sb_size; /* in */
+};
+
+/*
+ * struct qseecom_send_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ */
+struct qseecom_send_cmd_req {
+	void *cmd_req_buf; /* in */
+	unsigned int cmd_req_len; /* in */
+	void *resp_buf; /* in/out */
+	unsigned int resp_len; /* in/out */
+};
+
+/*
+ * struct qseecom_ion_fd_info - ion fd handle data information
+ * @fd - ion handle to some memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct qseecom_ion_fd_info {
+	int32_t fd;
+	uint32_t cmd_buf_offset;
+};
+/*
+ * struct qseecom_send_modfd_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct qseecom_send_modfd_cmd_req {
+	void *cmd_req_buf; /* in */
+	unsigned int cmd_req_len; /* in */
+	void *resp_buf; /* in/out */
+	unsigned int resp_len; /* in/out */
+	struct qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+/*
+ * struct qseecom_listener_send_resp_req - signal to continue the send_cmd req.
+ * Used as a trigger from HLOS service to notify QSEECOM that it's done with its
+ * operation and provide the response for QSEECOM can continue the incomplete
+ * command execution
+ * @resp_len - Length of the response
+ * @resp_buf - Response buffer where the response of the cmd should go.
+ */
+struct qseecom_send_resp_req {
+	void *resp_buf; /* in */
+	unsigned int resp_len; /* in */
+};
+
+/*
+ * struct qseecom_load_img_data - for sending image length information and
+ * ion file descriptor to the qseecom driver. ion file descriptor is used
+ * for retrieving the ion file handle and in turn the physical address of
+ * the image location.
+ * @mdt_len - Length of the .mdt file in bytes.
+ * @img_len - Length of the .mdt + .b00 +..+.bxx images files in bytes
+ * @ion_fd - Ion file descriptor used when allocating memory.
+ * @img_name - Name of the image.
+ * @app_arch - Architecture of the image, i.e. 32bit or 64bit app
+ */
+struct qseecom_load_img_req {
+	uint32_t mdt_len; /* in */
+	uint32_t img_len; /* in */
+	int32_t  ifd_data_fd; /* in */
+	char	 img_name[MAX_APP_NAME_SIZE]; /* in */
+	uint32_t app_arch; /* in */
+	uint32_t app_id; /* out*/
+};
+
+struct qseecom_set_sb_mem_param_req {
+	int32_t ifd_data_fd; /* in */
+	void *virt_sb_base; /* in */
+	uint32_t sb_len; /* in */
+};
+
+/*
+ * struct qseecom_qseos_version_req - get qseos version
+ * @qseos_version - version number
+ */
+struct qseecom_qseos_version_req {
+	unsigned int qseos_version; /* in */
+};
+
+/*
+ * struct qseecom_qseos_app_load_query - verify if app is loaded in qsee
+ * @app_name[MAX_APP_NAME_SIZE]-  name of the app.
+ * @app_id - app id.
+ */
+struct qseecom_qseos_app_load_query {
+	char app_name[MAX_APP_NAME_SIZE]; /* in */
+	uint32_t app_id; /* out */
+	uint32_t app_arch;
+};
+
+struct qseecom_send_svc_cmd_req {
+	uint32_t cmd_id;
+	void *cmd_req_buf; /* in */
+	unsigned int cmd_req_len; /* in */
+	void *resp_buf; /* in/out */
+	unsigned int resp_len; /* in/out */
+};
+
+enum qseecom_key_management_usage_type {
+	QSEOS_KM_USAGE_DISK_ENCRYPTION = 0x01,
+	QSEOS_KM_USAGE_FILE_ENCRYPTION = 0x02,
+	QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION = 0x03,
+	QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION = 0x04,
+	QSEOS_KM_USAGE_MAX
+};
+
+struct qseecom_create_key_req {
+	unsigned char hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+struct qseecom_wipe_key_req {
+	enum qseecom_key_management_usage_type usage;
+	int wipe_key_flag;/* 1->remove key from storage(alone with clear key) */
+			  /* 0->do not remove from storage (clear key) */
+};
+
+struct qseecom_update_key_userinfo_req {
+	unsigned char current_hash32[QSEECOM_HASH_SIZE];
+	unsigned char new_hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+#define SHA256_DIGEST_LENGTH	(256/8)
+/*
+ * struct qseecom_save_partition_hash_req
+ * @partition_id - partition id.
+ * @hash[SHA256_DIGEST_LENGTH] -  sha256 digest.
+ */
+struct qseecom_save_partition_hash_req {
+	int partition_id; /* in */
+	char digest[SHA256_DIGEST_LENGTH]; /* in */
+};
+
+/*
+ * struct qseecom_is_es_activated_req
+ * @is_activated - 1=true , 0=false
+ */
+struct qseecom_is_es_activated_req {
+	int is_activated; /* out */
+};
+
+/*
+ * struct qseecom_mdtp_cipher_dip_req
+ * @in_buf - input buffer
+ * @in_buf_size - input buffer size
+ * @out_buf - output buffer
+ * @out_buf_size - output buffer size
+ * @direction - 0=encrypt, 1=decrypt
+ */
+struct qseecom_mdtp_cipher_dip_req {
+	uint8_t *in_buf;
+	uint32_t in_buf_size;
+	uint8_t *out_buf;
+	uint32_t out_buf_size;
+	uint32_t direction;
+};
+
+enum qseecom_bandwidth_request_mode {
+	INACTIVE = 0,
+	LOW,
+	MEDIUM,
+	HIGH,
+};
+
+/*
+ * struct qseecom_send_modfd_resp - for send command ioctl request
+ * @req_len - command buffer length
+ * @req_buf - command buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct qseecom_send_modfd_listener_resp {
+	void *resp_buf_ptr; /* in */
+	unsigned int resp_len; /* in */
+	struct qseecom_ion_fd_info ifd_data[MAX_ION_FD]; /* in */
+};
+
+struct qseecom_qteec_req {
+	void    *req_ptr;
+	uint32_t    req_len;
+	void    *resp_ptr;
+	uint32_t    resp_len;
+};
+
+struct qseecom_qteec_modfd_req {
+	void    *req_ptr;
+	uint32_t    req_len;
+	void    *resp_ptr;
+	uint32_t    resp_len;
+	struct qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+struct qseecom_sg_entry {
+	uint32_t phys_addr;
+	uint32_t len;
+};
+
+struct qseecom_sg_entry_64bit {
+	uint64_t phys_addr;
+	uint32_t len;
+} __attribute__ ((packed));
+
+/*
+ * sg list buf format version
+ * 1: Legacy format to support only 512 SG list entries
+ * 2: new format to support > 512 entries
+ */
+#define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_1	1
+#define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2	2
+
+struct qseecom_sg_list_buf_hdr_64bit {
+	struct qseecom_sg_entry_64bit  blank_entry;	/* must be all 0 */
+	uint32_t version;		/* sg list buf format version */
+	uint64_t new_buf_phys_addr;	/* PA of new buffer */
+	uint32_t nents_total;		/* Total number of SG entries */
+} __attribute__ ((packed));
+
+#define QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT	\
+			sizeof(struct qseecom_sg_list_buf_hdr_64bit)
+
+#define MAX_CE_PIPE_PAIR_PER_UNIT 3
+#define INVALID_CE_INFO_UNIT_NUM 0xffffffff
+
+#define CE_PIPE_PAIR_USE_TYPE_FDE 0
+#define CE_PIPE_PAIR_USE_TYPE_PFE 1
+
+struct qseecom_ce_pipe_entry {
+	int valid;
+	unsigned int ce_num;
+	unsigned int ce_pipe_pair;
+};
+
+struct qseecom_ice_data_t {
+	int flag;
+};
+
+#define MAX_CE_INFO_HANDLE_SIZE 32
+struct qseecom_ce_info_req {
+	unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+	unsigned int usage;
+	unsigned int unit_num;
+	unsigned int num_ce_pipe_entries;
+	struct qseecom_ce_pipe_entry ce_pipe_entry[MAX_CE_PIPE_PAIR_PER_UNIT];
+};
+
+struct qseecom_ice_key_data_t {
+	uint8_t key[ICE_KEY_SIZE];
+	uint32_t key_len;
+	uint8_t salt[ICE_SALT_SIZE];
+	uint32_t salt_len;
+};
+
+#define SG_ENTRY_SZ		sizeof(struct qseecom_sg_entry)
+#define SG_ENTRY_SZ_64BIT	sizeof(struct qseecom_sg_entry_64bit)
+
+struct file;
+
+
+#define QSEECOM_IOC_MAGIC    0x97
+
+
+#define QSEECOM_IOCTL_REGISTER_LISTENER_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 1, struct qseecom_register_listener_req)
+
+#define QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 2)
+
+#define QSEECOM_IOCTL_SEND_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 3, struct qseecom_send_cmd_req)
+
+#define QSEECOM_IOCTL_SEND_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 4, struct qseecom_send_modfd_cmd_req)
+
+#define QSEECOM_IOCTL_RECEIVE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 5)
+
+#define QSEECOM_IOCTL_SEND_RESP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 6)
+
+#define QSEECOM_IOCTL_LOAD_APP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 7, struct qseecom_load_img_req)
+
+#define QSEECOM_IOCTL_SET_MEM_PARAM_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 8, struct qseecom_set_sb_mem_param_req)
+
+#define QSEECOM_IOCTL_UNLOAD_APP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 9)
+
+#define QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 10, struct qseecom_qseos_version_req)
+
+#define QSEECOM_IOCTL_PERF_ENABLE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 11)
+
+#define QSEECOM_IOCTL_PERF_DISABLE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 12)
+
+#define QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 13, struct qseecom_load_img_req)
+
+#define QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 14)
+
+#define QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 15, struct qseecom_qseos_app_load_query)
+
+#define QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 16, struct qseecom_send_svc_cmd_req)
+
+#define QSEECOM_IOCTL_CREATE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 17, struct qseecom_create_key_req)
+
+#define QSEECOM_IOCTL_WIPE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 18, struct qseecom_wipe_key_req)
+
+#define QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 19, struct qseecom_save_partition_hash_req)
+
+#define QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 20, struct qseecom_is_es_activated_req)
+
+#define QSEECOM_IOCTL_SEND_MODFD_RESP \
+	_IOWR(QSEECOM_IOC_MAGIC, 21, struct qseecom_send_modfd_listener_resp)
+
+#define QSEECOM_IOCTL_SET_BUS_SCALING_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 23, int)
+
+#define QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 24, struct qseecom_update_key_userinfo_req)
+
+#define QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 30, struct qseecom_qteec_modfd_req)
+
+#define QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 31, struct qseecom_qteec_req)
+
+#define QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 32, struct qseecom_qteec_modfd_req)
+
+#define QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 33, struct qseecom_qteec_modfd_req)
+
+#define QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 34, struct qseecom_mdtp_cipher_dip_req)
+
+#define QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 35, struct qseecom_send_modfd_cmd_req)
+
+#define QSEECOM_IOCTL_SEND_MODFD_RESP_64 \
+	_IOWR(QSEECOM_IOC_MAGIC, 36, struct qseecom_send_modfd_listener_resp)
+
+#define QSEECOM_IOCTL_GET_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 40, struct qseecom_ce_info_req)
+
+#define QSEECOM_IOCTL_FREE_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 41, struct qseecom_ce_info_req)
+
+#define QSEECOM_IOCTL_QUERY_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 42, struct qseecom_ce_info_req)
+
+#define QSEECOM_IOCTL_SET_ICE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 43, struct qseecom_ice_data_t)
+
+#define QSEECOM_IOCTL_FBE_CLEAR_KEY \
+	_IOWR(QSEECOM_IOC_MAGIC, 44, struct qseecom_ice_key_data_t)
+
+#endif /* _UAPI_QSEECOM_H_ */
diff --git a/include/vservices/Kbuild b/include/vservices/Kbuild
new file mode 100644
index 0000000..8b955fc
--- /dev/null
+++ b/include/vservices/Kbuild
@@ -0,0 +1,2 @@
+header-y += protocol/
+header-y += ioctl.h
diff --git a/include/vservices/buffer.h b/include/vservices/buffer.h
new file mode 100644
index 0000000..910aa07
--- /dev/null
+++ b/include/vservices/buffer.h
@@ -0,0 +1,239 @@
+/*
+ * include/vservices/buffer.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines simple wrapper types for strings and variable-size buffers
+ * that are stored inside Virtual Services message buffers.
+ */
+
+#ifndef _VSERVICES_BUFFER_H_
+#define _VSERVICES_BUFFER_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+struct vs_mbuf;
+
+/**
+ * struct vs_string - Virtual Services fixed sized string type
+ * @ptr: String pointer
+ * @max_size: Maximum length of the string in bytes
+ *
+ * A handle to a possibly NUL-terminated string stored in a message buffer. If
+ * the size of the string equals to max_size, the string is not NUL-terminated.
+ * If the protocol does not specify an encoding, the encoding is assumed to be
+ * UTF-8. Wide character encodings are not supported by this type; use struct
+ * vs_pbuf for wide character strings.
+ */
+struct vs_string {
+	char *ptr;
+	size_t max_size;
+};
+
+/**
+ * vs_string_copyout - Copy a Virtual Services string to a C string buffer.
+ * @dest: C string to copy to
+ * @src: Virtual Services string to copy from
+ * @max_size: Size of the destination buffer, including the NUL terminator.
+ *
+ * The behaviour is similar to strlcpy(): that is, the copied string
+ * is guaranteed not to exceed the specified size (including the NUL
+ * terminator byte), and is guaranteed to be NUL-terminated as long as
+ * the size is nonzero (unlike strncpy()).
+ *
+ * The return value is the size of the input string (even if the output was
+ * truncated); this is to make truncation easy to detect.
+ */
+static inline size_t
+vs_string_copyout(char *dest, const struct vs_string *src, size_t max_size)
+{
+	size_t src_len = strnlen(src->ptr, src->max_size);
+
+	if (max_size) {
+		size_t dest_len = min(src_len, max_size - 1);
+
+		memcpy(dest, src->ptr, dest_len);
+		dest[dest_len] = '\0';
+	}
+	return src_len;
+}
+
+/**
+ * vs_string_copyin_len - Copy a C string, up to a given length, into a Virtual
+ *                        Services string.
+ * @dest: Virtual Services string to copy to
+ * @src: C string to copy from
+ * @max_size: Maximum number of bytes to copy
+ *
+ * Returns the number of bytes copied, which may be less than the input
+ * string's length.
+ */
+static inline size_t
+vs_string_copyin_len(struct vs_string *dest, const char *src, size_t max_size)
+{
+	strncpy(dest->ptr, src, min(max_size, dest->max_size));
+
+	return strnlen(dest->ptr, dest->max_size);
+}
+
+/**
+ * vs_string_copyin - Copy a C string into a Virtual Services string.
+ * @dest: Virtual Services string to copy to
+ * @src: C string to copy from
+ *
+ * Returns the number of bytes copied, which may be less than the input
+ * string's length.
+ */
+static inline size_t
+vs_string_copyin(struct vs_string *dest, const char *src)
+{
+	return vs_string_copyin_len(dest, src, dest->max_size);
+}
+
+/**
+ * vs_string_length - Return the size of the string stored in a Virtual Services
+ *                    string.
+ * @str: Virtual Service string to get the length of
+ */
+static inline size_t
+vs_string_length(struct vs_string *str)
+{
+	return strnlen(str->ptr, str->max_size);
+}
+
+/**
+ * vs_string_dup - Allocate a C string buffer and copy a Virtual Services string
+ *                 into it.
+ * @str: Virtual Services string to duplicate
+ */
+static inline char *
+vs_string_dup(struct vs_string *str, gfp_t gfp)
+{
+	size_t len;
+	char *ret;
+
+	len = strnlen(str->ptr, str->max_size) + 1;
+	ret = kmalloc(len, gfp);
+	if (ret)
+		vs_string_copyout(ret, str, len);
+	return ret;
+}
+
+/**
+ * vs_string_max_size - Return the maximum size of a Virtual Services string,
+ *                      not including the NUL terminator if the lenght of the
+ *                      string is equal to max_size.
+ *
+ * @str Virtual Services string to return the maximum size of.
+ *
+ * @return The maximum size of the string.
+ */
+static inline size_t
+vs_string_max_size(struct vs_string *str)
+{
+	return str->max_size;
+}
+
+/**
+ * struct vs_pbuf - Handle to a variable-size buffered payload.
+ * @data: Data buffer
+ * @size: Current size of the buffer
+ * @max_size: Maximum size of the buffer
+ *
+ * This is similar to struct vs_string, except that has an explicitly
+ * stored size rather than being null-terminated. The functions that
+ * return ssize_t all return the new size of the modified buffer, and
+ * will return a negative size if the buffer overflows.
+ */
+struct vs_pbuf {
+	void *data;
+	size_t size, max_size;
+};
+
+/**
+ * vs_pbuf_size - Get the size of a pbuf
+ * @pbuf: pbuf to get the size of
+ */
+static inline size_t vs_pbuf_size(const struct vs_pbuf *pbuf)
+{
+	return pbuf->size;
+}
+
+/**
+ * vs_pbuf_data - Get the data pointer for a a pbuf
+ * @pbuf: pbuf to get the data pointer for
+ */
+static inline const void *vs_pbuf_data(const struct vs_pbuf *pbuf)
+{
+	return pbuf->data;
+}
+
+/**
+ * vs_pbuf_resize - Resize a pbuf
+ * @pbuf: pbuf to resize
+ * @size: New size
+ */
+static inline ssize_t vs_pbuf_resize(struct vs_pbuf *pbuf, size_t size)
+{
+	if (size > pbuf->max_size)
+		return -EOVERFLOW;
+
+	pbuf->size = size;
+	return size;
+}
+
+/**
+ * vs_pbuf_copyin - Copy data into a pbuf
+ * @pbuf: pbuf to copy data into
+ * @offset: Offset to copy data to
+ * @data: Pointer to data to copy into the pbuf
+ * @nbytes: Number of bytes to copy into the pbuf
+ */
+static inline ssize_t vs_pbuf_copyin(struct vs_pbuf *pbuf, off_t offset,
+		const void *data, size_t nbytes)
+{
+	if (offset + nbytes > pbuf->size)
+		return -EOVERFLOW;
+
+	memcpy(pbuf->data + offset, data, nbytes);
+
+	return nbytes;
+}
+
+/**
+ * vs_pbuf_append - Append data to a pbuf
+ * @pbuf: pbuf to append to
+ * @data: Pointer to data to append to the pbuf
+ * @nbytes: Number of bytes to append
+ */
+static inline ssize_t vs_pbuf_append(struct vs_pbuf *pbuf,
+		const void *data, size_t nbytes)
+{
+	if (pbuf->size + nbytes > pbuf->max_size)
+		return -EOVERFLOW;
+
+	memcpy(pbuf->data + pbuf->size, data, nbytes);
+	pbuf->size += nbytes;
+
+	return pbuf->size;
+}
+
+/**
+ * vs_pbuf_dup_string - Duplicate the contents of a pbuf as a C string. The
+ * string is allocated and must be freed using kfree.
+ * @pbuf: pbuf to convert
+ * @gfp_flags: GFP flags for the string allocation
+ */
+static inline char *vs_pbuf_dup_string(struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+	return kstrndup(pbuf->data, pbuf->size, gfp_flags);
+}
+
+#endif /* _VSERVICES_BUFFER_H_ */
diff --git a/include/vservices/ioctl.h b/include/vservices/ioctl.h
new file mode 100644
index 0000000..d96fcab
--- /dev/null
+++ b/include/vservices/ioctl.h
@@ -0,0 +1,48 @@
+/*
+ * vservices/ioctl.h - Interface to service character devices
+ *
+ * Copyright (c) 2016, Cog Systems Pty Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PUBLIC_VSERVICES_IOCTL_H__
+#define __LINUX_PUBLIC_VSERVICES_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/* ioctls that work on any opened service device */
+#define IOCTL_VS_RESET_SERVICE		_IO('4', 0)
+#define IOCTL_VS_GET_NAME		_IOR('4', 1, char[16])
+#define IOCTL_VS_GET_PROTOCOL		_IOR('4', 2, char[32])
+
+/*
+ * Claim a device for user I/O (if no kernel driver is attached). The claim
+ * persists until the char device is closed.
+ */
+struct vs_ioctl_bind {
+	__u32 send_quota;
+	__u32 recv_quota;
+	__u32 send_notify_bits;
+	__u32 recv_notify_bits;
+	size_t msg_size;
+};
+#define IOCTL_VS_BIND_CLIENT _IOR('4', 3, struct vs_ioctl_bind)
+#define IOCTL_VS_BIND_SERVER _IOWR('4', 4, struct vs_ioctl_bind)
+
+/* send and receive messages and notifications */
+#define IOCTL_VS_NOTIFY _IOW('4', 5, __u32)
+struct vs_ioctl_iovec {
+	union {
+		__u32 iovcnt; /* input */
+		__u32 notify_bits; /* output (recv only) */
+	};
+	struct iovec *iov;
+};
+#define IOCTL_VS_SEND _IOW('4', 6, struct vs_ioctl_iovec)
+#define IOCTL_VS_RECV _IOWR('4', 7, struct vs_ioctl_iovec)
+
+#endif /* __LINUX_PUBLIC_VSERVICES_IOCTL_H__ */
diff --git a/include/vservices/protocol/Kbuild b/include/vservices/protocol/Kbuild
new file mode 100644
index 0000000..374d9b6
--- /dev/null
+++ b/include/vservices/protocol/Kbuild
@@ -0,0 +1,12 @@
+#
+# Find all of the protocol directory names, and get the basename followed
+# by a trailing slash.
+#
+protocols=$(shell find include/vservices/protocol/ -mindepth 1 -type d -exec basename {} \;)
+protocol_dirs=$(foreach p, $(protocols), $(p)/)
+
+#
+# Export the headers for all protocols. The kbuild file in each protocol
+# directory specifies exactly which headers to export.
+#
+header-y += $(protocol_dirs)
diff --git a/include/vservices/protocol/core.h b/include/vservices/protocol/core.h
new file mode 100644
index 0000000..3a86af5
--- /dev/null
+++ b/include/vservices/protocol/core.h
@@ -0,0 +1,145 @@
+/*
+ * include/vservices/protocol/core.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * These are the common generated definitions for the core protocol drivers;
+ * specifically the message IDs and the protocol state representation.
+ *
+ * This is currently hand-generated, but will eventually be autogenerated,
+ * from the protocol specifications in core.vs. Please keep it consistent
+ * with that file.
+ */
+
+#define VSERVICE_CORE_PROTOCOL_NAME "com.ok-labs.core"
+#define VSERVICE_CORE_PARAM_SIZE_SERVICE_INFO__PROTOCOL_NAME 32
+#define VSERVICE_CORE_PARAM_SIZE_SERVICE_INFO__SERVICE_NAME 16
+
+/*
+ * Identifiers for in-band messages.
+ *
+ * This definition applies in both directions, because there is no practical
+ * limit on message IDs (services are unlikely to define 2^16 distinct message
+ * names).
+ */
+typedef enum {
+	/** simple_protocol core **/
+	/* message out startup */
+	VSERVICE_CORE_MSG_STARTUP,
+
+	/* message out shutdown */
+	VSERVICE_CORE_MSG_SHUTDOWN,
+
+	/* command in sync connect */
+	VSERVICE_CORE_REQ_CONNECT,
+	VSERVICE_CORE_ACK_CONNECT,
+	VSERVICE_CORE_NACK_CONNECT,
+
+	/* command in sync disconnect */
+	VSERVICE_CORE_REQ_DISCONNECT,
+	VSERVICE_CORE_ACK_DISCONNECT,
+	VSERVICE_CORE_NACK_DISCONNECT,
+
+	/* command in service_count */
+	VSERVICE_CORE_REQ_SERVICE_COUNT,
+	VSERVICE_CORE_ACK_SERVICE_COUNT,
+	VSERVICE_CORE_NACK_SERVICE_COUNT,
+
+	/* command in queued service_info */
+	VSERVICE_CORE_REQ_SERVICE_INFO,
+	VSERVICE_CORE_ACK_SERVICE_INFO,
+	VSERVICE_CORE_NACK_SERVICE_INFO,
+
+	/* message inout service_reset */
+	VSERVICE_CORE_MSG_SERVICE_RESET,
+
+	/* message inout service_ready */
+	VSERVICE_CORE_MSG_SERVICE_READY,
+
+	/* message out notification bits */
+	VSERVICE_CORE_MSG_NOTIFICATION_BITS_INFO,
+
+} vservice_core_message_id_t;
+
+/*
+ * Notification bits are defined separately for each direction because there
+ * is relatively limited space to allocate them from (specifically, the bits in
+ * a machine word). It is unlikely but possible for a protocol to reach this
+ * limit.
+ */
+
+/* Bits in the in (client -> server) notification bitmask. */
+typedef enum {
+	/** simple_protocol core **/
+	/* No in notifications */
+
+	VSERVICE_CORE_NBIT_IN__COUNT = 0,
+} vservice_core_nbit_in_t;
+
+/* Masks for the in notification bits */
+/* No in notifications */
+
+/* Bits in the out (server -> client) notification bitmask. */
+typedef enum {
+	/** simple_protocol core **/
+	/* notification out reenumerate */
+	VSERVICE_CORE_NBIT_OUT_REENUMERATE = 0,
+
+	VSERVICE_CORE_NBIT_OUT__COUNT,
+} vservice_core_nbit_out_t;
+
+/* Masks for the out notification bits */
+#define VSERVICE_CORE_NMASK_OUT_REENUMERATE \
+		(1 << VSERVICE_CORE_NBIT_OUT_REENUMERATE)
+
+/* Valid states of the interface's generated state machine. */
+typedef enum {
+	/* state offline */
+	VSERVICE_CORE_STATE_OFFLINE = 0,
+
+	/* state disconnected */
+	VSERVICE_CORE_STATE_DISCONNECTED,
+	VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+
+	/* state connected */
+	VSERVICE_CORE_STATE_CONNECTED,
+	VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+
+	/* reset offline */
+	VSERVICE_CORE_STATE__RESET = VSERVICE_CORE_STATE_OFFLINE,
+} vservice_core_statenum_t;
+
+typedef struct {
+	vservice_core_statenum_t statenum;
+	bool pending_service_count;
+	unsigned pending_service_info;
+} vservice_core_state_t;
+
+#define VSERVICE_CORE_RESET_STATE (vservice_core_state_t) { \
+	.statenum = VSERVICE_CORE_STATE__RESET, \
+	.pending_service_count = false, \
+	.pending_service_info = 0 }
+
+#define VSERVICE_CORE_STATE_IS_OFFLINE(state) ( \
+	((state).statenum == VSERVICE_CORE_STATE_OFFLINE))
+#define VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ( \
+	((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED) || \
+	((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__CONNECT))
+#define VSERVICE_CORE_STATE_IS_CONNECTED(state) ( \
+	((state).statenum == VSERVICE_CORE_STATE_CONNECTED) || \
+	((state).statenum == VSERVICE_CORE_STATE_CONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_VALID(state) \
+	VSERVICE_CORE_STATE_IS_OFFLINE(state) ? ( \
+		((state).pending_service_count == false) && \
+		((state).pending_service_info == 0)) : \
+	VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ? ( \
+		((state).pending_service_count == false) && \
+		((state).pending_service_info == 0)) : \
+	VSERVICE_CORE_STATE_IS_CONNECTED(state) ? true : \
+	false)
diff --git a/include/vservices/protocol/core/Kbuild b/include/vservices/protocol/core/Kbuild
new file mode 100644
index 0000000..ec3cbe8
--- /dev/null
+++ b/include/vservices/protocol/core/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/core/client.h b/include/vservices/protocol/core/client.h
new file mode 100644
index 0000000..3d52999
--- /dev/null
+++ b/include/vservices/protocol/core/client.h
@@ -0,0 +1,155 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_CORE__)
+#define __VSERVICES_CLIENT_CORE__
+
+struct vs_service_device;
+struct vs_client_core_state;
+
+struct vs_client_core {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+    /** session setup **/
+	struct vs_client_core_state *(*alloc) (struct vs_service_device *
+					       service);
+	void (*release) (struct vs_client_core_state * _state);
+
+	struct vs_service_driver *driver;
+
+	/** Core service base interface **/
+	void (*start) (struct vs_client_core_state * _state);
+	void (*reset) (struct vs_client_core_state * _state);
+    /** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_client_core_state * _state);
+
+	struct {
+		int (*state_change) (struct vs_client_core_state * _state,
+				     vservice_core_statenum_t old,
+				     vservice_core_statenum_t new);
+
+		int (*ack_connect) (struct vs_client_core_state * _state);
+		int (*nack_connect) (struct vs_client_core_state * _state);
+
+		int (*ack_disconnect) (struct vs_client_core_state * _state);
+		int (*nack_disconnect) (struct vs_client_core_state * _state);
+
+		int (*msg_startup) (struct vs_client_core_state * _state,
+				    uint32_t core_in_quota,
+				    uint32_t core_out_quota);
+
+		int (*msg_shutdown) (struct vs_client_core_state * _state);
+
+		int (*msg_service_created) (struct vs_client_core_state *
+					    _state, uint32_t service_id,
+					    struct vs_string service_name,
+					    struct vs_string protocol_name,
+					    struct vs_mbuf * _mbuf);
+
+		int (*msg_service_removed) (struct vs_client_core_state *
+					    _state, uint32_t service_id);
+
+		int (*msg_server_ready) (struct vs_client_core_state * _state,
+					 uint32_t service_id, uint32_t in_quota,
+					 uint32_t out_quota,
+					 uint32_t in_bit_offset,
+					 uint32_t in_num_bits,
+					 uint32_t out_bit_offset,
+					 uint32_t out_num_bits);
+
+		int (*msg_service_reset) (struct vs_client_core_state * _state,
+					  uint32_t service_id);
+
+	} core;
+};
+
+struct vs_client_core_state {
+	vservice_core_protocol_state_t state;
+	struct vs_service_device *service;
+	bool released;
+};
+
+extern int vs_client_core_reopen(struct vs_client_core_state *_state);
+
+extern int vs_client_core_close(struct vs_client_core_state *_state);
+
+    /** interface core **/
+/* command sync connect */
+extern int vs_client_core_core_req_connect(struct vs_client_core_state *_state,
+					   gfp_t flags);
+
+	/* command sync disconnect */
+extern int vs_client_core_core_req_disconnect(struct vs_client_core_state
+					      *_state, gfp_t flags);
+
+	/* message startup */
+/* message shutdown */
+/* message service_created */
+extern int vs_client_core_core_getbufs_service_created(struct
+						       vs_client_core_state
+						       *_state,
+						       struct vs_string
+						       *service_name,
+						       struct vs_string
+						       *protocol_name,
+						       struct vs_mbuf *_mbuf);
+extern int vs_client_core_core_free_service_created(struct vs_client_core_state
+						    *_state,
+						    struct vs_string
+						    *service_name,
+						    struct vs_string
+						    *protocol_name,
+						    struct vs_mbuf *_mbuf);
+    /* message service_removed */
+/* message server_ready */
+/* message service_reset */
+extern int vs_client_core_core_send_service_reset(struct vs_client_core_state
+						  *_state, uint32_t service_id,
+						  gfp_t flags);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_core_client_register(struct vs_client_core *client,
+					   const char *name,
+					   struct module *owner);
+
+static inline int vservice_core_client_register(struct vs_client_core *client,
+						const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_core_client_register(client, name, this_module);
+}
+
+extern int vservice_core_client_unregister(struct vs_client_core *client);
+
+#endif				/* ! __VSERVICES_CLIENT_CORE__ */
diff --git a/include/vservices/protocol/core/common.h b/include/vservices/protocol/core/common.h
new file mode 100644
index 0000000..b496416
--- /dev/null
+++ b/include/vservices/protocol/core/common.h
@@ -0,0 +1,38 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CORE_PROTOCOL_H__)
+#define __VSERVICES_CORE_PROTOCOL_H__
+
+#define VSERVICE_CORE_PROTOCOL_NAME "com.ok-labs.core"
+typedef enum {
+	VSERVICE_CORE_CORE_REQ_CONNECT,
+	VSERVICE_CORE_CORE_ACK_CONNECT,
+	VSERVICE_CORE_CORE_NACK_CONNECT,
+	VSERVICE_CORE_CORE_REQ_DISCONNECT,
+	VSERVICE_CORE_CORE_ACK_DISCONNECT,
+	VSERVICE_CORE_CORE_NACK_DISCONNECT,
+	VSERVICE_CORE_CORE_MSG_STARTUP,
+	VSERVICE_CORE_CORE_MSG_SHUTDOWN,
+	VSERVICE_CORE_CORE_MSG_SERVICE_CREATED,
+	VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED,
+	VSERVICE_CORE_CORE_MSG_SERVER_READY,
+	VSERVICE_CORE_CORE_MSG_SERVICE_RESET,
+} vservice_core_message_id_t;
+typedef enum {
+	VSERVICE_CORE_NBIT_IN__COUNT
+} vservice_core_nbit_in_t;
+
+typedef enum {
+	VSERVICE_CORE_NBIT_OUT__COUNT
+} vservice_core_nbit_out_t;
+
+/* Notification mask macros */
+#endif				/* ! __VSERVICES_CORE_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/core/server.h b/include/vservices/protocol/core/server.h
new file mode 100644
index 0000000..959b8c3
--- /dev/null
+++ b/include/vservices/protocol/core/server.h
@@ -0,0 +1,171 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_CORE)
+#define VSERVICES_SERVER_CORE
+
+struct vs_service_device;
+struct vs_server_core_state;
+
+struct vs_server_core {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+
+	/*
+	 * These are the driver's recommended message quotas. They are used
+	 * by the core service to select message quotas for services with no
+	 * explicitly configured quotas.
+	 */
+	u32 in_quota_best;
+	u32 out_quota_best;
+    /** session setup **/
+	struct vs_server_core_state *(*alloc) (struct vs_service_device *
+					       service);
+	void (*release) (struct vs_server_core_state * _state);
+
+	struct vs_service_driver *driver;
+
+	/** Core service base interface **/
+	void (*start) (struct vs_server_core_state * _state);
+	void (*reset) (struct vs_server_core_state * _state);
+    /** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_server_core_state * _state);
+
+	struct {
+		int (*state_change) (struct vs_server_core_state * _state,
+				     vservice_core_statenum_t old,
+				     vservice_core_statenum_t new);
+
+		int (*req_connect) (struct vs_server_core_state * _state);
+
+		int (*req_disconnect) (struct vs_server_core_state * _state);
+
+		int (*msg_service_reset) (struct vs_server_core_state * _state,
+					  uint32_t service_id);
+
+	} core;
+};
+
+struct vs_server_core_state {
+	vservice_core_protocol_state_t state;
+	struct vs_service_device *service;
+	bool released;
+};
+
+/** Complete calls for server core functions **/
+
+    /** interface core **/
+/* command sync connect */
+extern int vs_server_core_core_send_ack_connect(struct vs_server_core_state
+						*_state, gfp_t flags);
+extern int vs_server_core_core_send_nack_connect(struct vs_server_core_state
+						 *_state, gfp_t flags);
+    /* command sync disconnect */
+extern int vs_server_core_core_send_ack_disconnect(struct vs_server_core_state
+						   *_state, gfp_t flags);
+extern int vs_server_core_core_send_nack_disconnect(struct vs_server_core_state
+						    *_state, gfp_t flags);
+    /* message startup */
+extern int vs_server_core_core_send_startup(struct vs_server_core_state *_state,
+					    uint32_t core_in_quota,
+					    uint32_t core_out_quota,
+					    gfp_t flags);
+
+	    /* message shutdown */
+extern int vs_server_core_core_send_shutdown(struct vs_server_core_state
+					     *_state, gfp_t flags);
+
+	    /* message service_created */
+extern struct vs_mbuf *vs_server_core_core_alloc_service_created(struct
+								 vs_server_core_state
+								 *_state,
+								 struct
+								 vs_string
+								 *service_name,
+								 struct
+								 vs_string
+								 *protocol_name,
+								 gfp_t flags);
+extern int vs_server_core_core_free_service_created(struct vs_server_core_state
+						    *_state,
+						    struct vs_string
+						    *service_name,
+						    struct vs_string
+						    *protocol_name,
+						    struct vs_mbuf *_mbuf);
+extern int vs_server_core_core_send_service_created(struct vs_server_core_state
+						    *_state,
+						    uint32_t service_id,
+						    struct vs_string
+						    service_name,
+						    struct vs_string
+						    protocol_name,
+						    struct vs_mbuf *_mbuf);
+
+	    /* message service_removed */
+extern int vs_server_core_core_send_service_removed(struct vs_server_core_state
+						    *_state,
+						    uint32_t service_id,
+						    gfp_t flags);
+
+	    /* message server_ready */
+extern int vs_server_core_core_send_server_ready(struct vs_server_core_state
+						 *_state, uint32_t service_id,
+						 uint32_t in_quota,
+						 uint32_t out_quota,
+						 uint32_t in_bit_offset,
+						 uint32_t in_num_bits,
+						 uint32_t out_bit_offset,
+						 uint32_t out_num_bits,
+						 gfp_t flags);
+
+	    /* message service_reset */
+extern int vs_server_core_core_send_service_reset(struct vs_server_core_state
+						  *_state, uint32_t service_id,
+						  gfp_t flags);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_core_server_register(struct vs_server_core *server,
+					   const char *name,
+					   struct module *owner);
+
+static inline int vservice_core_server_register(struct vs_server_core *server,
+						const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_core_server_register(server, name, this_module);
+}
+
+extern int vservice_core_server_unregister(struct vs_server_core *server);
+#endif				/* ! VSERVICES_SERVER_CORE */
diff --git a/include/vservices/protocol/core/types.h b/include/vservices/protocol/core/types.h
new file mode 100644
index 0000000..2d6928d
--- /dev/null
+++ b/include/vservices/protocol/core/types.h
@@ -0,0 +1,87 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_CORE_TYPES_H)
+#define VSERVICES_CORE_TYPES_H
+
+#define VSERVICE_CORE_SERVICE_NAME_SIZE (uint32_t)16
+
+#define VSERVICE_CORE_PROTOCOL_NAME_SIZE (uint32_t)32
+
+typedef enum {
+/* state offline */
+	VSERVICE_CORE_STATE_OFFLINE = 0,
+	VSERVICE_CORE_STATE_OFFLINE__CONNECT,
+	VSERVICE_CORE_STATE_OFFLINE__DISCONNECT,
+
+/* state disconnected */
+	VSERVICE_CORE_STATE_DISCONNECTED,
+	VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+	VSERVICE_CORE_STATE_DISCONNECTED__DISCONNECT,
+
+/* state connected */
+	VSERVICE_CORE_STATE_CONNECTED,
+	VSERVICE_CORE_STATE_CONNECTED__CONNECT,
+	VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+
+	VSERVICE_CORE__RESET = VSERVICE_CORE_STATE_OFFLINE
+} vservice_core_statenum_t;
+
+typedef struct {
+	vservice_core_statenum_t statenum;
+} vservice_core_state_t;
+
+#define VSERVICE_CORE_RESET_STATE (vservice_core_state_t) { \
+.statenum = VSERVICE_CORE__RESET}
+
+#define VSERVICE_CORE_STATE_IS_OFFLINE(state) (\
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE) || \
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_IS_DISCONNECTED(state) (\
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED) || \
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_IS_CONNECTED(state) (\
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED) || \
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_VALID(state) ( \
+VSERVICE_CORE_STATE_IS_OFFLINE(state) ? true : \
+VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ? true : \
+VSERVICE_CORE_STATE_IS_CONNECTED(state) ? true : \
+false)
+
+static inline const char *vservice_core_get_state_string(vservice_core_state_t
+							 state)
+{
+	static const char *names[] =
+	    { "offline", "offline__connect", "offline__disconnect",
+		"disconnected", "disconnected__connect",
+		    "disconnected__disconnect",
+		"connected", "connected__connect", "connected__disconnect"
+	};
+	if (!VSERVICE_CORE_STATE_VALID(state)) {
+		return "INVALID";
+	}
+	return names[state.statenum];
+}
+
+typedef struct {
+
+	vservice_core_state_t core;
+} vservice_core_protocol_state_t;
+
+#define VSERVICE_CORE_PROTOCOL_RESET_STATE (vservice_core_protocol_state_t) {\
+.core = VSERVICE_CORE_RESET_STATE }
+#endif				/* ! VSERVICES_CORE_TYPES_H */
diff --git a/include/vservices/protocol/serial/Kbuild b/include/vservices/protocol/serial/Kbuild
new file mode 100644
index 0000000..ec3cbe8
--- /dev/null
+++ b/include/vservices/protocol/serial/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/serial/client.h b/include/vservices/protocol/serial/client.h
new file mode 100644
index 0000000..78efed2e
--- /dev/null
+++ b/include/vservices/protocol/serial/client.h
@@ -0,0 +1,114 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_SERIAL__)
+#define __VSERVICES_CLIENT_SERIAL__
+
+struct vs_service_device;
+struct vs_client_serial_state;
+
+struct vs_client_serial {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+    /** session setup **/
+	struct vs_client_serial_state *(*alloc) (struct vs_service_device *
+						 service);
+	void (*release) (struct vs_client_serial_state * _state);
+
+	struct vs_service_driver *driver;
+
+/** Opened, reopened and closed functions **/
+
+	void (*opened) (struct vs_client_serial_state * _state);
+
+	void (*reopened) (struct vs_client_serial_state * _state);
+
+	void (*closed) (struct vs_client_serial_state * _state);
+
+/** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_client_serial_state * _state);
+
+	struct {
+		int (*msg_msg) (struct vs_client_serial_state * _state,
+				struct vs_pbuf b, struct vs_mbuf * _mbuf);
+
+	} serial;
+};
+
+struct vs_client_serial_state {
+	vservice_serial_protocol_state_t state;
+	uint32_t packet_size;
+	struct {
+		uint32_t packet_size;
+	} serial;
+	struct vs_service_device *service;
+	bool released;
+};
+
+extern int vs_client_serial_reopen(struct vs_client_serial_state *_state);
+
+extern int vs_client_serial_close(struct vs_client_serial_state *_state);
+
+    /** interface serial **/
+/* message msg */
+extern struct vs_mbuf *vs_client_serial_serial_alloc_msg(struct
+							 vs_client_serial_state
+							 *_state,
+							 struct vs_pbuf *b,
+							 gfp_t flags);
+extern int vs_client_serial_serial_getbufs_msg(struct vs_client_serial_state
+					       *_state, struct vs_pbuf *b,
+					       struct vs_mbuf *_mbuf);
+extern int vs_client_serial_serial_free_msg(struct vs_client_serial_state
+					    *_state, struct vs_pbuf *b,
+					    struct vs_mbuf *_mbuf);
+extern int vs_client_serial_serial_send_msg(struct vs_client_serial_state
+					    *_state, struct vs_pbuf b,
+					    struct vs_mbuf *_mbuf);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_serial_client_register(struct vs_client_serial *client,
+					     const char *name,
+					     struct module *owner);
+
+static inline int vservice_serial_client_register(struct vs_client_serial
+						  *client, const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_serial_client_register(client, name, this_module);
+}
+
+extern int vservice_serial_client_unregister(struct vs_client_serial *client);
+
+#endif				/* ! __VSERVICES_CLIENT_SERIAL__ */
diff --git a/include/vservices/protocol/serial/common.h b/include/vservices/protocol/serial/common.h
new file mode 100644
index 0000000..a530645
--- /dev/null
+++ b/include/vservices/protocol/serial/common.h
@@ -0,0 +1,37 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_SERIAL_PROTOCOL_H__)
+#define __VSERVICES_SERIAL_PROTOCOL_H__
+
+#define VSERVICE_SERIAL_PROTOCOL_NAME "com.ok-labs.serial"
+typedef enum {
+	VSERVICE_SERIAL_BASE_REQ_OPEN,
+	VSERVICE_SERIAL_BASE_ACK_OPEN,
+	VSERVICE_SERIAL_BASE_NACK_OPEN,
+	VSERVICE_SERIAL_BASE_REQ_CLOSE,
+	VSERVICE_SERIAL_BASE_ACK_CLOSE,
+	VSERVICE_SERIAL_BASE_NACK_CLOSE,
+	VSERVICE_SERIAL_BASE_REQ_REOPEN,
+	VSERVICE_SERIAL_BASE_ACK_REOPEN,
+	VSERVICE_SERIAL_BASE_NACK_REOPEN,
+	VSERVICE_SERIAL_BASE_MSG_RESET,
+	VSERVICE_SERIAL_SERIAL_MSG_MSG,
+} vservice_serial_message_id_t;
+typedef enum {
+	VSERVICE_SERIAL_NBIT_IN__COUNT
+} vservice_serial_nbit_in_t;
+
+typedef enum {
+	VSERVICE_SERIAL_NBIT_OUT__COUNT
+} vservice_serial_nbit_out_t;
+
+/* Notification mask macros */
+#endif				/* ! __VSERVICES_SERIAL_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/serial/server.h b/include/vservices/protocol/serial/server.h
new file mode 100644
index 0000000..001fed5
--- /dev/null
+++ b/include/vservices/protocol/serial/server.h
@@ -0,0 +1,134 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_SERIAL)
+#define VSERVICES_SERVER_SERIAL
+
+struct vs_service_device;
+struct vs_server_serial_state;
+
+struct vs_server_serial {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+
+	/*
+	 * These are the driver's recommended message quotas. They are used
+	 * by the core service to select message quotas for services with no
+	 * explicitly configured quotas.
+	 */
+	u32 in_quota_best;
+	u32 out_quota_best;
+    /** session setup **/
+	struct vs_server_serial_state *(*alloc) (struct vs_service_device *
+						 service);
+	void (*release) (struct vs_server_serial_state * _state);
+
+	struct vs_service_driver *driver;
+
+/** Open, reopen, close and closed functions **/
+
+	 vs_server_response_type_t(*open) (struct vs_server_serial_state *
+					   _state);
+
+	 vs_server_response_type_t(*reopen) (struct vs_server_serial_state *
+					     _state);
+
+	 vs_server_response_type_t(*close) (struct vs_server_serial_state *
+					    _state);
+
+	void (*closed) (struct vs_server_serial_state * _state);
+
+/** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_server_serial_state * _state);
+
+	struct {
+		int (*msg_msg) (struct vs_server_serial_state * _state,
+				struct vs_pbuf b, struct vs_mbuf * _mbuf);
+
+	} serial;
+};
+
+struct vs_server_serial_state {
+	vservice_serial_protocol_state_t state;
+	uint32_t packet_size;
+	struct {
+		uint32_t packet_size;
+	} serial;
+	struct vs_service_device *service;
+	bool released;
+};
+
+/** Complete calls for server core functions **/
+extern int vs_server_serial_open_complete(struct vs_server_serial_state *_state,
+					  vs_server_response_type_t resp);
+
+extern int vs_server_serial_close_complete(struct vs_server_serial_state
+					   *_state,
+					   vs_server_response_type_t resp);
+
+extern int vs_server_serial_reopen_complete(struct vs_server_serial_state
+					    *_state,
+					    vs_server_response_type_t resp);
+
+    /** interface serial **/
+/* message msg */
+extern struct vs_mbuf *vs_server_serial_serial_alloc_msg(struct
+							 vs_server_serial_state
+							 *_state,
+							 struct vs_pbuf *b,
+							 gfp_t flags);
+extern int vs_server_serial_serial_getbufs_msg(struct vs_server_serial_state
+					       *_state, struct vs_pbuf *b,
+					       struct vs_mbuf *_mbuf);
+extern int vs_server_serial_serial_free_msg(struct vs_server_serial_state
+					    *_state, struct vs_pbuf *b,
+					    struct vs_mbuf *_mbuf);
+extern int vs_server_serial_serial_send_msg(struct vs_server_serial_state
+					    *_state, struct vs_pbuf b,
+					    struct vs_mbuf *_mbuf);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_serial_server_register(struct vs_server_serial *server,
+					     const char *name,
+					     struct module *owner);
+
+static inline int vservice_serial_server_register(struct vs_server_serial
+						  *server, const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_serial_server_register(server, name, this_module);
+}
+
+extern int vservice_serial_server_unregister(struct vs_server_serial *server);
+#endif				/* ! VSERVICES_SERVER_SERIAL */
diff --git a/include/vservices/protocol/serial/types.h b/include/vservices/protocol/serial/types.h
new file mode 100644
index 0000000..46edf95
--- /dev/null
+++ b/include/vservices/protocol/serial/types.h
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERIAL_TYPES_H)
+#define VSERVICES_SERIAL_TYPES_H
+
+typedef enum {
+/* state closed */
+	VSERVICE_BASE_STATE_CLOSED = 0,
+	VSERVICE_BASE_STATE_CLOSED__OPEN,
+	VSERVICE_BASE_STATE_CLOSED__CLOSE,
+	VSERVICE_BASE_STATE_CLOSED__REOPEN,
+
+/* state running */
+	VSERVICE_BASE_STATE_RUNNING,
+	VSERVICE_BASE_STATE_RUNNING__OPEN,
+	VSERVICE_BASE_STATE_RUNNING__CLOSE,
+	VSERVICE_BASE_STATE_RUNNING__REOPEN,
+
+	VSERVICE_BASE__RESET = VSERVICE_BASE_STATE_CLOSED
+} vservice_base_statenum_t;
+
+typedef struct {
+	vservice_base_statenum_t statenum;
+} vservice_base_state_t;
+
+#define VSERVICE_BASE_RESET_STATE (vservice_base_state_t) { \
+.statenum = VSERVICE_BASE__RESET}
+
+#define VSERVICE_BASE_STATE_IS_CLOSED(state) (\
+((state).statenum == VSERVICE_BASE_STATE_CLOSED) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__REOPEN))
+
+#define VSERVICE_BASE_STATE_IS_RUNNING(state) (\
+((state).statenum == VSERVICE_BASE_STATE_RUNNING) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__REOPEN))
+
+#define VSERVICE_BASE_STATE_VALID(state) ( \
+VSERVICE_BASE_STATE_IS_CLOSED(state) ? true : \
+VSERVICE_BASE_STATE_IS_RUNNING(state) ? true : \
+false)
+
+static inline const char *vservice_base_get_state_string(vservice_base_state_t
+							 state)
+{
+	static const char *names[] =
+	    { "closed", "closed__open", "closed__close", "closed__reopen",
+		"running", "running__open", "running__close", "running__reopen"
+	};
+	if (!VSERVICE_BASE_STATE_VALID(state)) {
+		return "INVALID";
+	}
+	return names[state.statenum];
+}
+
+typedef struct {
+} vservice_serial_state_t;
+
+#define VSERVICE_SERIAL_RESET_STATE (vservice_serial_state_t) { \
+}
+
+#define VSERVICE_SERIAL_STATE_VALID(state) true
+
+typedef struct {
+
+	vservice_base_state_t base;
+
+	vservice_serial_state_t serial;
+} vservice_serial_protocol_state_t;
+
+#define VSERVICE_SERIAL_PROTOCOL_RESET_STATE (vservice_serial_protocol_state_t) {\
+.base = VSERVICE_BASE_RESET_STATE,\
+.serial = VSERVICE_SERIAL_RESET_STATE }
+
+#define VSERVICE_SERIAL_IS_STATE_RESET(state) \
+            ((state).base.statenum == VSERVICE_BASE__RESET)
+#endif				/* ! VSERVICES_SERIAL_TYPES_H */
diff --git a/include/vservices/service.h b/include/vservices/service.h
new file mode 100644
index 0000000..45e047e
--- /dev/null
+++ b/include/vservices/service.h
@@ -0,0 +1,674 @@
+/*
+ * include/vservices/service.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the driver and device types for vServices client and
+ * server drivers. These are generally defined by generated protocol-layer
+ * code. However, they can also be defined directly by applications that
+ * don't require protocol generation.
+ */
+
+#ifndef _VSERVICE_SERVICE_H_
+#define _VSERVICE_SERVICE_H_
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)
+#include <asm/atomic.h>
+#else
+#include <linux/atomic.h>
+#endif
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/types.h>
+
+struct vs_mbuf;
+
+/**
+ * struct vs_service_driver - Virtual service driver structure
+ * @protocol: Protocol name for this driver
+ * @is_server: True if this is a server driver, false if it is a client driver
+ * @rx_atomic: If set to false then the receive message handlers are run from
+ *	     workqueue context and are allowed to sleep. If set to true
+ *	     the message handlers are run from tasklet context and may not
+ *	     sleep. For this purpose, tx_ready is considered a receive
+ *	     message handler.
+ * @tx_atomic: If this is set to true along with rx_atomic, the driver is
+ *	allowed to send messages from softirq contexts other than the receive
+ *	message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ *	messages may only be sent from the receive message handlers, or from
+ *	task context after calling vs_service_state_lock.
+ * @probe: Probe function for this service
+ * @remove: Remove function for this service
+ * --- Callbacks ---
+ * @receive: Message handler function for this service
+ * @notify: Incoming notification handler function for this service
+ * @start: Callback which is run when this service is started
+ * @reset: Callback which is run when this service is reset
+ * @tx_ready: Callback which is run when the service has dropped below its
+ *	    send quota
+ * --- Resource requirements (valid for server only) ---
+ * @in_quota_min: minimum number of input messages for protocol functionality
+ * @in_quota_best: suggested number of input messages
+ * @out_quota_min: minimum number of output messages for protocol functionality
+ * @out_quota_best: suggested number of output messages
+ * @in_notify_count: number of input notification bits used
+ * @out_notify_count: number of output notification bits used
+ * --- Internal ---
+ * @driver: Linux device model driver structure
+ *
+ * The callback functions for a virtual service driver are all called from
+ * the virtual service device's work queue.
+ */
+struct vs_service_driver {
+	const char *protocol;
+	bool is_server;
+	bool rx_atomic, tx_atomic;
+
+	int (*probe)(struct vs_service_device *service);
+	int (*remove)(struct vs_service_device *service);
+
+	int (*receive)(struct vs_service_device *service,
+		struct vs_mbuf *mbuf);
+	void (*notify)(struct vs_service_device *service, u32 flags);
+
+	void (*start)(struct vs_service_device *service);
+	void (*reset)(struct vs_service_device *service);
+
+	int (*tx_ready)(struct vs_service_device *service);
+
+	unsigned in_quota_min;
+	unsigned in_quota_best;
+	unsigned out_quota_min;
+	unsigned out_quota_best;
+	unsigned in_notify_count;
+	unsigned out_notify_count;
+
+	struct device_driver driver;
+};
+
+#define to_vs_service_driver(d) \
+	container_of(d, struct vs_service_driver, driver)
+
+/* The vServices server/client bus types */
+extern struct bus_type vs_client_bus_type;
+extern struct bus_type vs_server_bus_type;
+
+/**
+ * struct vs_service_stats - Virtual service statistics
+ * @over_quota_time: Internal counter for tracking over quota time.
+ * @sent_mbufs: Total number of message buffers sent.
+ * @sent_bytes: Total bytes sent.
+ * @send_failures: Total number of send failures.
+ * @recv_mbufs: Total number of message buffers received.
+ * @recv_bytes: Total number of bytes recevied.
+ * @recv_failures: Total number of receive failures.
+ * @nr_over_quota: Number of times an mbuf allocation has failed because the
+ *                 service is over quota.
+ * @nr_tx_ready: Number of times the service has run its tx_ready handler
+ * @over_quota_time_total: The total amount of time in milli-seconds that the
+ *                         service has spent over quota. Measured as the time
+ *                         between exceeding quota in mbuf allocation and
+ *                         running the tx_ready handler.
+ * @over_quota_time_avg: The average amount of time in milli-seconds that the
+ *                       service is spending in the over quota state.
+ */
+struct vs_service_stats {
+	unsigned long	over_quota_time;
+
+	atomic_t        sent_mbufs;
+	atomic_t        sent_bytes;
+	atomic_t	send_failures;
+	atomic_t        recv_mbufs;
+	atomic_t        recv_bytes;
+	atomic_t	recv_failures;
+	atomic_t        nr_over_quota;
+	atomic_t        nr_tx_ready;
+	atomic_t        over_quota_time_total;
+	atomic_t        over_quota_time_avg;
+};
+
+/**
+ * struct vs_service_device - Virtual service device
+ * @id: Unique ID (to the session) for this service
+ * @name: Service name
+ * @sysfs_name: The sysfs name for the service
+ * @protocol: Service protocol name
+ * @is_server: True if this device is server, false if it is a client
+ * @owner: service responsible for managing this service. This must be
+ *     on the same session, and is NULL iff this is the core service.
+ *     It must not be a service whose driver has tx_atomic set.
+ * @lock_subclass: the number of generations of owners between this service
+ *     and the core service; 0 for the core service, 1 for anything directly
+ *     created by it, and so on. This is only used for verifying lock
+ *     ordering (when lockdep is enabled), hence the name.
+ * @ready_lock: mutex protecting readiness, disable_count and driver_probed.
+ *     This depends on the state_mutex of the service's owner, if any. Acquire
+ *     it using mutex_lock_nested(ready_lock, lock_subclass).
+ * @readiness: Service's readiness state, owned by session layer.
+ * @disable_count: Number of times the service has been disabled without
+ *     a matching enable.
+ * @driver_probed: True if a driver has been probed (and not removed)
+ * @work_queue: Work queue for this service's task-context work.
+ * @rx_tasklet: Tasklet for handling incoming messages. This is only used
+ *     if the service driver has rx_atomic set to true. Otherwise
+ *     incoming messages are handled on the workqueue by rx_work.
+ * @rx_work: Work structure for handling incoming messages. This is only
+ *     used if the service driver has rx_atomic set to false.
+ * @rx_lock: Spinlock which protects access to rx_queue and tx_ready
+ * @rx_queue: Queue of incoming messages
+ * @tx_ready: Flag indicating that a tx_ready event is pending
+ * @tx_batching: Flag indicating that outgoing messages are being batched
+ * @state_spinlock: spinlock used to protect the service state if the
+ *     service driver has tx_atomic (and rx_atomic) set to true. This
+ *     depends on the service's ready_lock. Acquire it only by
+ *     calling vs_service_state_lock_bh().
+ * @state_mutex: mutex used to protect the service state if the service
+ *     driver has tx_atomic set to false. This depends on the service's
+ *     ready_lock, and if rx_atomic is true, the rx_tasklet must be
+ *     disabled while it is held. Acquire it only by calling
+ *     vs_service_state_lock().
+ * @state_spinlock_used: Flag to check if the state spinlock has been acquired.
+ * @state_mutex_used: Flag to check if the state mutex has been acquired.
+ * @reset_work: Work to reset the service after a driver fails
+ * @pending_reset: Set if reset_work has been queued and not completed.
+ * @ready_work: Work to make service ready after a throttling delay
+ * @cooloff_work: Work for cooling off reset throttling after the reset
+ * throttling limit was hit
+ * @cleanup_work: Work for cleaning up and freeing the service structure
+ * @last_reset: Time in jiffies at which this service last reset
+ * @last_reset_request: Time in jiffies the last reset request for this
+ *     service occurred at
+ * @last_ready: Time in jiffies at which this service last became ready
+ * @reset_delay: Time in jiffies that the next throttled reset will be
+ *     delayed for. A value of zero means that reset throttling is not in
+ *     effect.
+ * @is_over_quota: Internal flag for whether the service is over quota. This
+ *                 flag is only used for stats accounting.
+ * @quota_wq: waitqueue that is woken whenever the available send quota
+ *            increases.
+ * @notify_send_bits: The number of bits allocated for outgoing notifications.
+ * @notify_send_offset: The first bit allocated for outgoing notifications.
+ * @notify_recv_bits: The number of bits allocated for incoming notifications.
+ * @notify_recv_offset: The first bit allocated for incoming notifications.
+ * @send_quota: The maximum number of outgoing messages.
+ * @recv_quota: The maximum number of incoming messages.
+ * @in_quota_set: For servers, the number of client->server messages
+ *     requested during system configuration (sysfs or environment).
+ * @out_quota_set: For servers, the number of server->client messages
+ *     requested during system configuration (sysfs or environment).
+ * @dev: Linux device model device structure
+ * @stats: Service statistics
+ */
+struct vs_service_device {
+	vs_service_id_t id;
+	char *name;
+	char *sysfs_name;
+	char *protocol;
+	bool is_server;
+
+	struct vs_service_device *owner;
+	unsigned lock_subclass;
+
+	struct mutex ready_lock;
+	unsigned readiness;
+	int disable_count;
+	bool driver_probed;
+
+	struct workqueue_struct *work_queue;
+
+	struct tasklet_struct rx_tasklet;
+	struct work_struct rx_work;
+
+	spinlock_t rx_lock;
+	struct list_head rx_queue;
+	bool tx_ready, tx_batching;
+
+	spinlock_t state_spinlock;
+	struct mutex state_mutex;
+
+	struct work_struct reset_work;
+	bool pending_reset;
+	struct delayed_work ready_work;
+	struct delayed_work cooloff_work;
+	struct work_struct cleanup_work;
+
+	unsigned long last_reset;
+	unsigned long last_reset_request;
+	unsigned long last_ready;
+	unsigned long reset_delay;
+
+	atomic_t is_over_quota;
+	wait_queue_head_t quota_wq;
+
+	unsigned notify_send_bits;
+	unsigned notify_send_offset;
+	unsigned notify_recv_bits;
+	unsigned notify_recv_offset;
+	unsigned send_quota;
+	unsigned recv_quota;
+
+	unsigned in_quota_set;
+	unsigned out_quota_set;
+
+	void *transport_priv;
+
+	struct device dev;
+	struct vs_service_stats stats;
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	bool state_spinlock_used;
+	bool state_mutex_used;
+#endif
+};
+
+#define to_vs_service_device(d) container_of(d, struct vs_service_device, dev)
+
+/**
+ * vs_service_get_session - Return the session for a service
+ * @service: Service to get the session for
+ */
+static inline struct vs_session_device *
+vs_service_get_session(struct vs_service_device *service)
+{
+	return to_vs_session_device(service->dev.parent);
+}
+
+/**
+ * vs_service_send - Send a message from a service
+ * @service: Service to send the message from
+ * @mbuf: Message buffer to send
+ */
+static inline int
+vs_service_send(struct vs_service_device *service, struct vs_mbuf *mbuf)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	const struct vs_transport_vtable *vt = session->transport->vt;
+	const unsigned long flags =
+		service->tx_batching ?  VS_TRANSPORT_SEND_FLAGS_MORE : 0;
+	size_t msg_size = vt->mbuf_size(mbuf);
+	int err;
+
+	err = vt->send(session->transport, service, mbuf, flags);
+	if (!err) {
+		atomic_inc(&service->stats.sent_mbufs);
+		atomic_add(msg_size, &service->stats.sent_bytes);
+	} else {
+		atomic_inc(&service->stats.send_failures);
+	}
+
+	return err;
+}
+
+/**
+ * vs_service_alloc_mbuf - Allocate a message buffer for a service
+ * @service: Service to allocate the buffer for
+ * @size: Size of the data buffer to allocate
+ * @flags: Flags to pass to the buffer allocation
+ */
+static inline struct vs_mbuf *
+vs_service_alloc_mbuf(struct vs_service_device *service, size_t size,
+		gfp_t flags)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_mbuf *mbuf;
+
+	mbuf = session->transport->vt->alloc_mbuf(session->transport,
+			service, size, flags);
+	if (IS_ERR(mbuf) && PTR_ERR(mbuf) == -ENOBUFS) {
+		/* Over quota accounting */
+		if (atomic_cmpxchg(&service->is_over_quota, 0, 1) == 0) {
+			service->stats.over_quota_time = jiffies;
+			atomic_inc(&service->stats.nr_over_quota);
+		}
+	}
+
+	/*
+	 * The transport drivers should return either a valid message buffer
+	 * pointer or an ERR_PTR value. Warn here if a transport driver is
+	 * returning NULL on message buffer allocation failure.
+	 */
+	if (WARN_ON_ONCE(!mbuf))
+		return ERR_PTR(-ENOMEM);
+
+	return mbuf;
+}
+
+/**
+ * vs_service_free_mbuf - Deallocate a message buffer for a service
+ * @service: Service the message buffer was allocated for
+ * @mbuf: Message buffer to deallocate
+ */
+static inline void
+vs_service_free_mbuf(struct vs_service_device *service, struct vs_mbuf *mbuf)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	session->transport->vt->free_mbuf(session->transport, service, mbuf);
+}
+
+/**
+ * vs_service_notify - Send a notification from a service
+ * @service: Service to send the notification from
+ * @flags: Notification bits to send
+ */
+static inline int
+vs_service_notify(struct vs_service_device *service, u32 flags)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	return session->transport->vt->notify(session->transport,
+			service, flags);
+}
+
+/**
+ * vs_service_has_atomic_rx - Return whether or not a service's receive
+ * message handler runs in atomic context. This function should only be
+ * called for services which are bound to a driver.
+ *
+ * @service: Service to check
+ */
+static inline bool
+vs_service_has_atomic_rx(struct vs_service_device *service)
+{
+	if (WARN_ON(!service->dev.driver))
+		return false;
+
+	return to_vs_service_driver(service->dev.driver)->rx_atomic;
+}
+
+/**
+ * vs_session_max_mbuf_size - Return the maximum allocation size of a message
+ * buffer.
+ * @service: The service to check
+ */
+static inline size_t
+vs_service_max_mbuf_size(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	return session->transport->vt->max_mbuf_size(session->transport);
+}
+
+/**
+ * vs_service_send_mbufs_available - Return the number of mbufs which can be
+ * allocated for sending before going over quota.
+ * @service: The service to check
+ */
+static inline ssize_t
+vs_service_send_mbufs_available(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	return session->transport->vt->service_send_avail(session->transport,
+			service);
+}
+
+/**
+ * vs_service_has_atomic_tx - Return whether or not a service is allowed to
+ * transmit from atomic context (other than its receive message handler).
+ * This function should only be called for services which are bound to a
+ * driver.
+ *
+ * @service: Service to check
+ */
+static inline bool
+vs_service_has_atomic_tx(struct vs_service_device *service)
+{
+	if (WARN_ON(!service->dev.driver))
+		return false;
+
+	return to_vs_service_driver(service->dev.driver)->tx_atomic;
+}
+
+/**
+ * vs_service_state_lock - Acquire a lock allowing service state operations
+ * from external task contexts.
+ *
+ * @service: Service to lock.
+ *
+ * This must be used to protect any service state accesses that occur in task
+ * contexts outside of a callback from the vservices protocol layer. It must
+ * not be called from a protocol layer callback, nor from atomic context.
+ *
+ * If this service's state is also accessed from softirq contexts other than
+ * vservices protocol layer callbacks, use vs_service_state_lock_bh instead,
+ * and set the driver's tx_atomic flag.
+ *
+ * If this is called from outside the service's workqueue, the calling driver
+ * must provide its own guarantee that it has not been detached from the
+ * service. If that is not possible, use vs_state_lock_safe().
+ */
+static inline void
+vs_service_state_lock(struct vs_service_device *service)
+__acquires(service)
+{
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	WARN_ON_ONCE(vs_service_has_atomic_tx(service));
+#endif
+
+	mutex_lock_nested(&service->state_mutex, service->lock_subclass);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	if (WARN_ON_ONCE(service->state_spinlock_used))
+		dev_err(&service->dev, "Service is using both the state spinlock and mutex - Fix your driver\n");
+	service->state_mutex_used = true;
+#endif
+
+	if (vs_service_has_atomic_rx(service))
+		tasklet_disable(&service->rx_tasklet);
+
+	__acquire(service);
+}
+
+/**
+ * vs_service_state_unlock - Release the lock acquired by vs_service_state_lock.
+ *
+ * @service: Service to unlock.
+ */
+static inline void
+vs_service_state_unlock(struct vs_service_device *service)
+__releases(service)
+{
+	__release(service);
+
+	mutex_unlock(&service->state_mutex);
+
+	if (vs_service_has_atomic_rx(service)) {
+		tasklet_enable(&service->rx_tasklet);
+
+		/* Kick the tasklet if there is RX work to do */
+		if (!list_empty(&service->rx_queue))
+			tasklet_schedule(&service->rx_tasklet);
+	}
+}
+
+/**
+ * vs_service_state_lock_bh - Acquire a lock allowing service state operations
+ * from external task or softirq contexts.
+ *
+ * @service: Service to lock.
+ *
+ * This is an alternative to vs_service_state_lock for drivers that receive
+ * messages in atomic context (i.e. have their rx_atomic flag set), *and* must
+ * transmit messages from softirq contexts other than their own message
+ * receive and tx_ready callbacks. Such drivers must set their tx_atomic
+ * flag, so generated protocol drivers perform correct locking.
+ *
+ * This should replace all calls to vs_service_state_lock for services that
+ * need it. Do not use both locking functions in one service driver.
+ *
+ * The calling driver must provide its own guarantee that it has not been
+ * detached from the service. If that is not possible, use
+ * vs_state_lock_safe_bh().
+ */
+static inline void
+vs_service_state_lock_bh(struct vs_service_device *service)
+__acquires(service)
+__acquires(&service->state_spinlock)
+{
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	WARN_ON_ONCE(!vs_service_has_atomic_rx(service));
+	WARN_ON_ONCE(!vs_service_has_atomic_tx(service));
+#endif
+
+#ifdef CONFIG_SMP
+	/* Not necessary on UP because it's implied by spin_lock_bh(). */
+	tasklet_disable(&service->rx_tasklet);
+#endif
+
+	spin_lock_bh(&service->state_spinlock);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	if (WARN_ON_ONCE(service->state_mutex_used))
+		dev_err(&service->dev, "Service is using both the state spinlock and mutex - Fix your driver\n");
+	service->state_spinlock_used = true;
+#endif
+
+	__acquire(service);
+}
+
+/**
+ * vs_service_state_unlock_bh - Release the lock acquired by
+ * vs_service_state_lock_bh.
+ *
+ * @service: Service to unlock.
+ */
+static inline void
+vs_service_state_unlock_bh(struct vs_service_device *service)
+__releases(service)
+__releases(&service->state_spinlock)
+{
+	__release(service);
+
+	spin_unlock_bh(&service->state_spinlock);
+
+#ifdef CONFIG_SMP
+	tasklet_enable(&service->rx_tasklet);
+#endif
+}
+
+/* Convenience macros for locking a state structure rather than a service. */
+#define vs_state_lock(state) vs_service_state_lock((state)->service)
+#define vs_state_unlock(state) vs_service_state_unlock((state)->service)
+#define vs_state_lock_bh(state) vs_service_state_lock_bh((state)->service)
+#define vs_state_unlock_bh(state) vs_service_state_unlock_bh((state)->service)
+
+/**
+ * vs_state_lock_safe[_bh] - Aqcuire a lock for a state structure's service,
+ * when the service may have been detached from the state.
+ *
+ * This is useful for blocking operations that can't easily be terminated
+ * before returning from the service reset handler, such as file I/O. To use
+ * this, the state structure should be reference-counted rather than freed in
+ * the release callback, and the driver should retain its own reference to the
+ * service until the state structure is freed.
+ *
+ * This macro acquires the lock and returns true if the state has not been
+ * detached from the service. Otherwise, it returns false.
+ *
+ * Note that the _bh variant cannot be used from atomic context, because it
+ * acquires a mutex.
+ */
+#define __vs_state_lock_safe(_state, _lock, _unlock) ({ \
+	bool __ok = true;						\
+	typeof(_state) __state = (_state);				\
+	struct vs_service_device *__service = __state->service;		\
+	mutex_lock_nested(&__service->ready_lock,			\
+			__service->lock_subclass);			\
+	__ok = !READ_ONCE(__state->released);				\
+	if (__ok) {							\
+		_lock(__state);						\
+		__ok = !READ_ONCE(__state->released);			\
+		if (!__ok)						\
+			_unlock(__state);				\
+	}								\
+	mutex_unlock(&__service->ready_lock);				\
+	__ok;								\
+})
+#define vs_state_lock_safe(_state) \
+	__vs_state_lock_safe((_state), vs_state_lock, vs_state_unlock)
+#define vs_state_lock_safe_bh(_state) \
+	__vs_state_lock_safe((_state), vs_state_lock_bh, vs_state_unlock_bh)
+
+/**
+ * vs_get_service - Get a reference to a service.
+ * @service: Service to get a reference to.
+ */
+static inline struct vs_service_device *
+vs_get_service(struct vs_service_device *service)
+{
+	if (service)
+		get_device(&service->dev);
+	return service;
+}
+
+/**
+ * vs_put_service - Put a reference to a service.
+ * @service: The service to put the reference to.
+ */
+static inline void
+vs_put_service(struct vs_service_device *service)
+{
+	put_device(&service->dev);
+}
+
+extern int vs_service_reset(struct vs_service_device *service,
+		struct vs_service_device *caller);
+extern void vs_service_reset_nosync(struct vs_service_device *service);
+
+/**
+ * vs_service_send_batch_start - Start a batch of outgoing messages
+ * @service: The service that is starting a batch
+ * @flush: Finish any previously started batch (if false, then duplicate
+ * calls to this function have no effect)
+ */
+static inline void
+vs_service_send_batch_start(struct vs_service_device *service, bool flush)
+{
+	if (flush && service->tx_batching) {
+		struct vs_session_device *session =
+			vs_service_get_session(service);
+		const struct vs_transport_vtable *vt = session->transport->vt;
+		if (vt->flush)
+			vt->flush(session->transport, service);
+	} else {
+		service->tx_batching = true;
+	}
+}
+
+/**
+ * vs_service_send_batch_end - End a batch of outgoing messages
+ * @service: The service that is ending a batch
+ * @flush: Start sending the batch immediately (if false, the batch will
+ * be flushed when the next message is sent)
+ */
+static inline void
+vs_service_send_batch_end(struct vs_service_device *service, bool flush)
+{
+	service->tx_batching = false;
+	if (flush) {
+		struct vs_session_device *session =
+			vs_service_get_session(service);
+		const struct vs_transport_vtable *vt = session->transport->vt;
+		if (vt->flush)
+			vt->flush(session->transport, service);
+	}
+}
+
+
+#endif /* _VSERVICE_SERVICE_H_ */
diff --git a/include/vservices/session.h b/include/vservices/session.h
new file mode 100644
index 0000000..b9dc775
--- /dev/null
+++ b/include/vservices/session.h
@@ -0,0 +1,161 @@
+/*
+ * include/vservices/session.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the device type for a vServices session attached to a
+ * transport. This should only be used by transport drivers, the vServices
+ * session code, and the inline transport-access functions defined in
+ * vservices/service.h.
+ *
+ * Drivers for these devices are defined internally by the vServices
+ * framework. Other drivers should not attach to these devices.
+ */
+
+#ifndef _VSERVICES_SESSION_H_
+#define _VSERVICES_SESSION_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+
+#include <vservices/types.h>
+
+struct vs_service_device;
+struct vs_mbuf;
+
+struct notifier_block;
+
+/**
+ * enum vs_notify_event_t - vService notifier events
+ *
+ * @VS_SESSION_NOTIFY_ADD: vService session added. Argument is a pointer to
+ * the vs_session_device. This notification is sent after the session has been
+ * added.
+ *
+ * @VS_SESSION_NOTIFY_REMOVE: vService session about to be removed. Argument is
+ * a pointer to the vs_session_device. This notification is sent before the
+ * session is removed.
+ */
+enum vs_notify_event_t {
+	VS_SESSION_NOTIFY_ADD,
+	VS_SESSION_NOTIFY_REMOVE,
+};
+
+/**
+ * struct vs_session_device - Session device
+ * @name: The unique human-readable name of this session.
+ * @is_server: True if this session is a server, false if client
+ * @transport: The transport device for this session
+ * @session_num: Unique ID for this session. Used for sysfs
+ * @session_lock: Mutex which protects any change to service presence or
+ *     readiness
+ * @core_service: The core service, if one has ever been registered. Once set,
+ *     this must remain valid and unchanged until the session driver is
+ *     removed. Writes are protected by the service_ids_lock.
+ * @services: Dynamic array of the services on this session. Protected by
+ *     service_ids_lock.
+ * @alloc_service_ids: Size of the session services array
+ * @service_ids_lock: Mutex protecting service array updates
+ * @activation_work: work structure for handling session activation & reset
+ * @activation_state: true if transport is currently active
+ * @fatal_error_work: work structure for handling fatal session failures
+ * @debug_mask: Debug level mask
+ * @list: Entry in the global session list
+ * @sysfs_entry: Kobject pointer pointing to session device in sysfs under
+ *     sys/vservices
+ * @dev: Device structure for the Linux device model
+ */
+struct vs_session_device {
+	char *name;
+	bool is_server;
+	struct vs_transport *transport;
+	int session_num;
+
+	struct mutex session_lock;
+
+	/*
+	 * The service_idr maintains the list of currently allocated services
+	 * on a session, and allows for recycling of service ids. The lock also
+	 * protects core_service.
+	 */
+	struct idr service_idr;
+	struct mutex service_idr_lock;
+	struct vs_service_device *core_service;
+
+	struct work_struct activation_work;
+	atomic_t activation_state;
+
+	struct work_struct fatal_error_work;
+
+	unsigned long debug_mask;
+
+	struct list_head list;
+	struct kobject *sysfs_entry;
+
+	struct device dev;
+};
+
+#define to_vs_session_device(d) \
+	container_of(d, struct vs_session_device, dev)
+
+extern struct vs_session_device *
+vs_session_register(struct vs_transport *transport, struct device *parent,
+		bool server, const char *transport_name);
+extern void vs_session_start(struct vs_session_device *session);
+extern void vs_session_unregister(struct vs_session_device *session);
+
+extern int vs_session_handle_message(struct vs_session_device *session,
+		struct vs_mbuf *mbuf, vs_service_id_t service_id);
+
+extern void vs_session_quota_available(struct vs_session_device *session,
+		vs_service_id_t service_id, unsigned count,
+		bool send_tx_ready);
+
+extern void vs_session_handle_notify(struct vs_session_device *session,
+		unsigned long flags, vs_service_id_t service_id);
+
+extern void vs_session_handle_reset(struct vs_session_device *session);
+extern void vs_session_handle_activate(struct vs_session_device *session);
+
+extern struct vs_service_device *
+vs_server_create_service(struct vs_session_device *session,
+		struct vs_service_device *parent, const char *name,
+		const char *protocol, const void *plat_data);
+extern int vs_server_destroy_service(struct vs_service_device *service,
+		struct vs_service_device *parent);
+
+extern void vs_session_register_notify(struct notifier_block *nb);
+extern void vs_session_unregister_notify(struct notifier_block *nb);
+
+extern int vs_session_unbind_driver(struct vs_service_device *service);
+
+extern void vs_session_for_each_service(struct vs_session_device *session,
+		void (*func)(struct vs_service_device *, void *), void *data);
+
+extern struct mutex vs_session_lock;
+extern int vs_session_for_each_locked(
+		int (*fn)(struct vs_session_device *session, void *data),
+		void *data);
+
+static inline int vs_session_for_each(
+		int (*fn)(struct vs_session_device *session, void *data),
+		void *data)
+{
+	int r;
+	mutex_lock(&vs_session_lock);
+	r = vs_session_for_each_locked(fn, data);
+	mutex_unlock(&vs_session_lock);
+	return r;
+}
+
+#endif /* _VSERVICES_SESSION_H_ */
diff --git a/include/vservices/transport.h b/include/vservices/transport.h
new file mode 100644
index 0000000..6251ce1
--- /dev/null
+++ b/include/vservices/transport.h
@@ -0,0 +1,150 @@
+/*
+ * include/vservices/transport.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains the transport vtable structure. This is made public so
+ * that the application drivers can call the vtable functions directly (via
+ * the inlined wrappers in service.h) rather than indirectly via a function
+ * call.
+ *
+ */
+
+#ifndef _VSERVICES_TRANSPORT_H_
+#define _VSERVICES_TRANSPORT_H_
+
+#include <linux/types.h>
+
+#include <vservices/types.h>
+
+struct vs_transport;
+struct vs_mbuf;
+struct vs_service_device;
+
+/**
+ * struct vs_transport_vtable - Transport driver operations. Transport drivers
+ * must provide implementations for all operations in this table.
+ * --- Message buffer allocation ---
+ * @alloc_mbuf: Allocate an mbuf of the given size for the given service
+ * @free_mbuf: Deallocate an mbuf
+ * @mbuf_size: Return the size in bytes of a message buffer. The size returned
+ *             should be the total number of bytes including any headers.
+ * @max_mbuf_size: Return the maximum allowable message buffer allocation size.
+ * --- Message sending ---
+ * @send: Queue an mbuf for sending
+ * @flush: Start the transfer for the current message batch, if any
+ * @notify: Send a notification
+ * --- Transport-level reset handling ---
+ * @reset: Reset the transport layer
+ * @ready: Ready the transport layer
+ * --- Service management ---
+ * @service_add: A new service has been added to this transport's session
+ * @service_remove: A service has been removed from this transport's session
+ * @service_start: A service on this transport's session has had its resource
+ *     allocations set and is about to start. This is always interleaved with
+ *     service_reset, with one specific exception: the core service client,
+ *     which has its quotas initially hard-coded to 0 send / 1 recv and
+ *     adjusted when the initial startup message arrives.
+ * @service_reset: A service on this transport's session has just been reset,
+ *     and any resources allocated to it should be cleaned up to prepare
+ *     for later reallocation.
+ * @service_send_avail: The number of message buffers that this service is
+ *                      able to send before going over quota.
+ * --- Query transport capabilities ---
+ * @get_notify_bits: Fetch the number of sent and received notification bits
+ *     supported by this transport. Note that this can be any positive value
+ *     up to UINT_MAX.
+ * @get_quota_limits: Fetch the total send and receive message buffer quotas
+ *     supported by this transport. Note that this can be any positive value
+ *     up to UINT_MAX.
+ */
+struct vs_transport_vtable {
+	/* Message buffer allocation */
+	struct vs_mbuf *(*alloc_mbuf)(struct vs_transport *transport,
+			struct vs_service_device *service, size_t size,
+			gfp_t gfp_flags);
+	void (*free_mbuf)(struct vs_transport *transport,
+			struct vs_service_device *service,
+			struct vs_mbuf *mbuf);
+	size_t (*mbuf_size)(struct vs_mbuf *mbuf);
+	size_t (*max_mbuf_size)(struct vs_transport *transport);
+
+	/* Sending messages */
+	int (*send)(struct vs_transport *transport,
+			struct vs_service_device *service,
+			struct vs_mbuf *mbuf, unsigned long flags);
+	int (*flush)(struct vs_transport *transport,
+			struct vs_service_device *service);
+	int (*notify)(struct vs_transport *transport,
+			struct vs_service_device *service,
+			unsigned long bits);
+
+	/* Raising and clearing transport-level reset */
+	void (*reset)(struct vs_transport *transport);
+	void (*ready)(struct vs_transport *transport);
+
+	/* Service management */
+	int (*service_add)(struct vs_transport *transport,
+			struct vs_service_device *service);
+	void (*service_remove)(struct vs_transport *transport,
+			struct vs_service_device *service);
+
+	int (*service_start)(struct vs_transport *transport,
+			struct vs_service_device *service);
+	int (*service_reset)(struct vs_transport *transport,
+			struct vs_service_device *service);
+
+	ssize_t (*service_send_avail)(struct vs_transport *transport,
+			struct vs_service_device *service);
+
+	/* Query transport capabilities */
+	void (*get_notify_bits)(struct vs_transport *transport,
+			unsigned *send_notify_bits, unsigned *recv_notify_bits);
+	void (*get_quota_limits)(struct vs_transport *transport,
+			unsigned *send_quota, unsigned *recv_quota);
+};
+
+/* Flags for .send */
+#define VS_TRANSPORT_SEND_FLAGS_MORE		0x1
+
+/**
+ * struct vs_transport - A structure representing a transport
+ * @type: type of transport i.e. microvisror/loopback etc
+ * @vt: Transport operations table
+ * @notify_info: Array of incoming notification settings
+ * @notify_info_size: Size of the incoming notification array
+ */
+struct vs_transport {
+	const char *type;
+	const struct vs_transport_vtable *vt;
+	struct vs_notify_info *notify_info;
+	int notify_info_size;
+};
+
+/**
+ * struct vs_mbuf - Message buffer. This is always allocated and released by the
+ * transport callbacks defined above, so it may be embedded in a
+ * transport-specific structure containing additional state.
+ * @data: Message data buffer
+ * @size: Size of the data buffer in bytes
+ * @is_recv: True if this mbuf was received from the other end of the
+ *           transport. False if it was allocated by this end for sending.
+ * @priv: Private value that will not be touched by the framework
+ * @queue: list_head for entry in lists. The session layer uses this queue
+ * for receiving messages. The transport driver may use this queue for its
+ * own purposes when sending messages.
+ */
+struct vs_mbuf {
+	void *data;
+	size_t size;
+	bool is_recv;
+	void *priv;
+	struct list_head queue;
+};
+
+#endif /* _VSERVICES_TRANSPORT_H_ */
diff --git a/include/vservices/types.h b/include/vservices/types.h
new file mode 100644
index 0000000..306156e
--- /dev/null
+++ b/include/vservices/types.h
@@ -0,0 +1,41 @@
+/*
+ * include/vservices/types.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _VSERVICE_TYPES_H
+#define _VSERVICE_TYPES_H
+
+#include <linux/types.h>
+
+typedef u16 vs_service_id_t;
+typedef u16 vs_message_id_t;
+
+/*
+ * An opaque handle to a queued asynchronous command. This is used internally
+ * by the generated interface code, to identify which of the pending commands
+ * is being replied to. It is provided as a parameter to non-blocking handler
+ * callbacks for queued asynchronous requests, and must be stored by the server
+ * and passed to the corresponding reply call.
+ */
+typedef struct vservice_queued_request vservice_queued_request_t;
+
+/*
+ * Following enum is to be used by server for informing about successful or
+ * unsuccessful open callback by using VS_SERVER_RESP_SUCCESS or
+ * VS_SERVER_RESP_FAILURE resepectively. Server can choose to complete request
+ * explicitely in this case it should return VS_SERVER_RESP_EXPLICIT_COMPLETE.
+ */
+typedef enum vs_server_response_type {
+	VS_SERVER_RESP_SUCCESS,
+	VS_SERVER_RESP_FAILURE,
+	VS_SERVER_RESP_EXPLICIT_COMPLETE
+} vs_server_response_type_t;
+
+#endif /*_VSERVICE_TYPES_H */
diff --git a/include/vservices/wait.h b/include/vservices/wait.h
new file mode 100644
index 0000000..aaa5bc4
--- /dev/null
+++ b/include/vservices/wait.h
@@ -0,0 +1,454 @@
+/*
+ * include/vservices/wait.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Generic wait event helpers for Virtual Service drivers.
+ */
+
+#ifndef _VSERVICE_SERVICE_WAIT_H
+#define _VSERVICE_SERVICE_WAIT_H
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/sched/signal.h>
+
+/* Older kernels don't have lockdep_assert_held_once(). */
+#ifndef lockdep_assert_held_once
+#ifdef CONFIG_LOCKDEP
+#define lockdep_assert_held_once(l) do {				\
+		WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));	\
+	} while (0)
+#else
+#define lockdep_assert_held_once(l) do { } while (0)
+#endif
+#endif
+
+/* Legacy wait macro; needs rewriting to use vs_state_lock_safe(). */
+/* FIXME: Redmine ticket #229 - philip. */
+/**
+ * __vs_service_wait_event - Wait for a condition to become true for a
+ * Virtual Service.
+ *
+ * @_service: The service to wait for the condition to be true for.
+ * @_wq: Waitqueue to wait on.
+ * @_condition: Condition to wait for.
+ *
+ * Returns: This function returns 0 if the condition is true, or a -ERESTARTSYS
+ *          if the wait loop wait interrupted. If _state is TASK_UNINTERRUPTIBLE
+ *          then this function will always return 0.
+ *
+ * This function must be called with the service's state lock held. The wait
+ * is performed without the state lock held, but the condition is re-checked
+ * after reacquiring the state lock. This property allows this function to
+ * check the state of the service's protocol in a thread safe manner.
+ *
+ * The caller is responsible for ensuring that it has not been detached from
+ * the given service.
+ *
+ * It is nearly always wrong to call this on the service workqueue, since
+ * the workqueue is single-threaded and the state can only change when a
+ * handler function is called on it.
+ */
+#define __vs_service_wait_event(_service, _wq, _cond, _state)		\
+	({								\
+		DEFINE_WAIT(__wait);					\
+		int __ret = 0;						\
+									\
+		lockdep_assert_held_once(&(_service)->state_mutex);	\
+		do {							\
+			prepare_to_wait(&(_wq), &__wait, (_state));	\
+									\
+			if (_cond)					\
+				break;					\
+									\
+			if ((_state) == TASK_INTERRUPTIBLE &&		\
+					signal_pending(current)) {	\
+				__ret = -ERESTARTSYS;			\
+				break;					\
+			}						\
+									\
+			vs_service_state_unlock(_service);		\
+			schedule();					\
+			vs_service_state_lock(_service);		\
+		} while (!(_cond));					\
+									\
+		finish_wait(&(_wq), &__wait);				\
+		__ret;							\
+	})
+
+/* Legacy wait macros; need rewriting to use __vs_wait_state(). */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_wait_event(_service, _wq, _cond) \
+	__vs_service_wait_event(_service, _wq, _cond, TASK_INTERRUPTIBLE)
+#define vs_service_wait_event_nointr(_service, _wq, _cond) \
+	__vs_service_wait_event(_service, _wq, _cond, TASK_UNINTERRUPTIBLE)
+
+/**
+ * __vs_wait_state - block until a condition becomes true on a service state.
+ *
+ * @_state: The protocol state to wait on.
+ * @_cond: Condition to wait for.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ *         with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ *         timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ *         non-framework tasklet); otherwise nothing.
+ *
+ * Return: Return a pointer to a message buffer on successful allocation,
+ *         or an error code in ERR_PTR form.
+ *
+ * This macro blocks waiting until a particular condition becomes true on a
+ * service state. The service must be running; if not, or if it ceases to be
+ * running during the wait, -ECANCELED will be returned.
+ *
+ * This is not an exclusive wait. If an exclusive wait is desired it is
+ * usually better to use the waiting alloc or send functions.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The state lock will be dropped by waiting
+ * but reacquired before returning, unless -ENOLINK is returned, in which case
+ * the service driver has been unbound and the lock cannot be reacquired.
+ */
+#define __vs_wait_state(_state, _cond, _intr, _timeout, _bh)	\
+	({								\
+		DEFINE_WAIT(__wait);					\
+		int __ret;						\
+		int __jiffies __maybe_unused = (_timeout);		\
+		struct vs_service_device *__service = (_state)->service;\
+									\
+		while (1) {						\
+			prepare_to_wait(&__service->quota_wq, &__wait,	\
+					_intr ? TASK_INTERRUPTIBLE :    \
+					TASK_UNINTERRUPTIBLE);		\
+									\
+			if (!VSERVICE_BASE_STATE_IS_RUNNING(		\
+					(_state)->state.base)) {	\
+				__ret = -ECANCELED;			\
+				break;					\
+			}						\
+									\
+			if (_cond) {					\
+				__ret = 0;				\
+				break;					\
+			}						\
+									\
+			if (_intr && signal_pending(current)) {		\
+				__ret = -ERESTARTSYS;			\
+				break;					\
+			}						\
+									\
+			vs_state_unlock##_bh(_state);			\
+									\
+			if (_timeout >= 0) {				\
+				__jiffies = schedule_timeout(__jiffies);\
+				if (!__jiffies) {			\
+					__ret = -ETIMEDOUT;		\
+					break;				\
+				}					\
+			} else {					\
+				schedule();				\
+			}						\
+									\
+			if (!vs_state_lock_safe##_bh(_state)) {		\
+				__ret = -ENOLINK;			\
+				break;					\
+			}						\
+		}							\
+									\
+		finish_wait(&__service->quota_wq, &__wait);		\
+		__ret;							\
+	})
+
+/* Specialisations of __vs_wait_state for common uses. */
+#define vs_wait_state(_state, _cond) \
+	__vs_wait_state(_state, _cond, true, -1,)
+#define vs_wait_state_timeout(_state, _cond, _timeout) \
+	__vs_wait_state(_state, _cond, true, _timeout,)
+#define vs_wait_state_nointr(_state, _cond) \
+	__vs_wait_state(_state, _cond, false, -1,)
+#define vs_wait_state_nointr_timeout(_state, _cond, _timeout) \
+	__vs_wait_state(_state, _cond, false, _timeout,)
+#define vs_wait_state_bh(_state, _cond) \
+	__vs_wait_state(_state, _cond, true, -1, _bh)
+#define vs_wait_state_timeout_bh(_state, _cond, _timeout) \
+	__vs_wait_state(_state, _cond, true, _timeout, _bh)
+#define vs_wait_state_nointr_bh(_state, _cond) \
+	__vs_wait_state(_state, _cond, false, -1, _bh)
+#define vs_wait_state_nointr_timeout_bh(_state, _cond, _timeout) \
+	__vs_wait_state(_state, _cond, false, _timeout, _bh)
+
+/**
+ * __vs_wait_alloc - block until quota is available, then allocate a buffer.
+ *
+ * @_state: The protocol state to allocate a message for.
+ * @_alloc_func: The message buffer allocation function to run. This is the
+ *         full function invocation, not a pointer to the function.
+ * @_cond: Additional condition which must remain true, or else the wait
+ *         will fail with -ECANCELED. This is typically used to check the
+ *         service's protocol state. Note that this condition will only
+ *         be checked after sleeping; it is assumed to be true when the
+ *         macro is first called.
+ * @_unlock: If true, drop the service state lock before sleeping. The wait
+ *         may then fail with -ENOLINK if the driver is detached from the
+ *         service, in which case the lock is dropped.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ *         with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ *         timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ *         non-framework tasklet); otherwise nothing.
+ *
+ * Return: Return a pointer to a message buffer on successful allocation,
+ *         or an error code in ERR_PTR form.
+ *
+ * This macro calls a specified message allocation function, and blocks
+ * if it returns -ENOBUFS, waiting until quota is available on the service
+ * before retrying. It aborts the wait if the service resets, or if the
+ * optionally specified condition becomes false. Note that a reset followed
+ * quickly by an activate might not trigger a failure; if that is significant
+ * for your driver, use the optional condition to detect it.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The reference and state lock will still be
+ * held on return, unless -ENOLINK is returned, in which case the lock has been
+ * dropped and cannot be reacquired.
+ *
+ * This is always an exclusive wait. It is safe to call without separately
+ * waking the waitqueue afterwards; if the allocator function fails for any
+ * reason other than quota exhaustion then another waiter will be woken.
+ *
+ * Be wary of potential deadlocks when using this macro on the service
+ * workqueue. If both ends block their service workqueues waiting for quota,
+ * then no progress can be made. It is usually only correct to block the
+ * service workqueue on the server side.
+ */
+#define __vs_wait_alloc(_state, _alloc_func, _cond, _unlock, _intr, 	\
+		_timeout, _bh)						\
+	({								\
+		DEFINE_WAIT(__wait);					\
+		struct vs_mbuf *__mbuf = NULL;				\
+		int __jiffies __maybe_unused = (_timeout);		\
+		struct vs_service_device *__service = (_state)->service;\
+									\
+		while (!vs_service_send_mbufs_available(__service)) {	\
+			if (_intr && signal_pending(current)) {		\
+				__mbuf = ERR_PTR(-ERESTARTSYS);		\
+				break;					\
+			}						\
+									\
+			prepare_to_wait_exclusive(			\
+					&__service->quota_wq, &__wait,	\
+					_intr ? TASK_INTERRUPTIBLE :    \
+					TASK_UNINTERRUPTIBLE);		\
+									\
+			if (_unlock)					\
+				vs_state_unlock##_bh(_state);		\
+									\
+			if (_timeout >= 0) {				\
+				__jiffies = schedule_timeout(__jiffies);\
+				if (!__jiffies) {			\
+					__mbuf = ERR_PTR(-ETIMEDOUT);	\
+					break;				\
+				}					\
+			} else {					\
+				schedule();				\
+			}						\
+									\
+			if (_unlock && !vs_state_lock_safe##_bh(	\
+						_state)) {		\
+				__mbuf = ERR_PTR(-ENOLINK);		\
+				break;					\
+			}						\
+									\
+			if (!VSERVICE_BASE_STATE_IS_RUNNING(		\
+					(_state)->state.base) ||	\
+					!(_cond)) {			\
+				__mbuf = ERR_PTR(-ECANCELED);		\
+				break;					\
+			}						\
+		}							\
+		finish_wait(&__service->quota_wq, &__wait);		\
+									\
+		if (__mbuf == NULL)					\
+			__mbuf = (_alloc_func);				\
+		if (IS_ERR(__mbuf) && (PTR_ERR(__mbuf) != -ENOBUFS))	\
+			wake_up(&__service->quota_wq);			\
+		__mbuf;							\
+	})
+
+/* Specialisations of __vs_wait_alloc for common uses. */
+#define vs_wait_alloc(_state, _cond, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1,)
+#define vs_wait_alloc_timeout(_state, _cond, _alloc_func, _timeout) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, _timeout,)
+#define vs_wait_alloc_nointr(_state, _cond, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1,)
+#define vs_wait_alloc_nointr_timeout(_state, _cond, _alloc_func, _timeout) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, _timeout,)
+#define vs_wait_alloc_bh(_state, _cond, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1, _bh)
+#define vs_wait_alloc_timeout_bh(_state, _cond, _alloc_func, _timeout) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, _timeout, _bh)
+#define vs_wait_alloc_nointr_bh(_state, _cond, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1, _bh)
+#define vs_wait_alloc_nointr_timeout_bh(_state, _cond, _alloc_func, _timeout) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, _timeout, _bh)
+#define vs_wait_alloc_locked(_state, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, true, false, true, -1,)
+
+/* Legacy wait macros, to be removed and replaced with those above. */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_waiting_alloc(_state, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, true, false, true, -1,)
+#define vs_service_waiting_alloc_cond_locked(_state, _alloc_func, _cond) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1,)
+#define vs_service_waiting_alloc_cond_locked_nointr(_state, _alloc_func, _cond) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1,)
+
+/**
+ * __vs_wait_send - block until quota is available, then send a message.
+ *
+ * @_state: The protocol state to send a message for.
+ * @_cond: Additional condition which must remain true, or else the wait
+ *         will fail with -ECANCELED. This is typically used to check the
+ *         service's protocol state. Note that this condition will only
+ *         be checked after sleeping; it is assumed to be true when the
+ *         macro is first called.
+ * @_send_func: The message send function to run. This is the full function
+ *         invocation, not a pointer to the function.
+ * @_unlock: If true, drop the service state lock before sleeping. The wait
+ *         may then fail with -ENOLINK if the driver is detached from the
+ *         service, in which case the lock is dropped.
+ * @_check_running: If true, the wait will return -ECANCELED if the service's
+ *         base state is not active, or ceases to be active.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ *         with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ *         timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ *         non-framework tasklet); otherwise nothing.
+ *
+ * Return: If the send succeeds, then 0 is returned; otherwise an error
+ *         code may be returned as described above.
+ *
+ * This macro calls a specified message send function, and blocks if it
+ * returns -ENOBUFS, waiting until quota is available on the service before
+ * retrying. It aborts the wait if it finds the service in reset, or if the
+ * optionally specified condition becomes false. Note that a reset followed
+ * quickly by an activate might not trigger a failure; if that is significant
+ * for your driver, use the optional condition to detect it.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The reference and state lock will still be
+ * held on return, unless -ENOLINK is returned, in which case the lock has been
+ * dropped and cannot be reacquired.
+ *
+ * This is always an exclusive wait. It is safe to call without separately
+ * waking the waitqueue afterwards; if the allocator function fails for any
+ * reason other than quota exhaustion then another waiter will be woken.
+ *
+ * Be wary of potential deadlocks when calling this function on the service
+ * workqueue. If both ends block their service workqueues waiting for quota,
+ * then no progress can be made. It is usually only correct to block the
+ * service workqueue on the server side.
+ */
+#define __vs_wait_send(_state, _cond, _send_func, _unlock, 		\
+		_check_running, _intr, _timeout, _bh)			\
+	({								\
+		DEFINE_WAIT(__wait);					\
+		int __ret = 0;						\
+		int __jiffies __maybe_unused = (_timeout);		\
+		struct vs_service_device *__service = (_state)->service;\
+									\
+		while (!vs_service_send_mbufs_available(__service)) {	\
+			if (_intr && signal_pending(current)) {		\
+				__ret = -ERESTARTSYS;			\
+				break;					\
+			}						\
+									\
+			prepare_to_wait_exclusive(			\
+					&__service->quota_wq, &__wait,	\
+					_intr ? TASK_INTERRUPTIBLE :    \
+					TASK_UNINTERRUPTIBLE);		\
+									\
+			if (_unlock)					\
+				vs_state_unlock##_bh(_state);		\
+									\
+			if (_timeout >= 0) {				\
+				__jiffies = schedule_timeout(__jiffies);\
+				if (!__jiffies) {			\
+					__ret = -ETIMEDOUT;		\
+					break;				\
+				}					\
+			} else {					\
+				schedule();				\
+			}						\
+									\
+			if (_unlock && !vs_state_lock_safe##_bh(	\
+						_state)) {		\
+				__ret = -ENOLINK;			\
+				break;					\
+			}						\
+									\
+			if ((_check_running &&				\
+					!VSERVICE_BASE_STATE_IS_RUNNING(\
+					(_state)->state.base)) ||	\
+					!(_cond)) {			\
+				__ret = -ECANCELED;			\
+				break;					\
+			}						\
+		}							\
+		finish_wait(&__service->quota_wq, &__wait);		\
+									\
+		if (!__ret)						\
+			__ret = (_send_func);				\
+		if ((__ret < 0) && (__ret != -ENOBUFS))			\
+			wake_up(&__service->quota_wq);			\
+		__ret;							\
+	})
+
+/* Specialisations of __vs_wait_send for common uses. */
+#define vs_wait_send(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, -1,)
+#define vs_wait_send_timeout(_state, _cond, _send_func, _timeout) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, _timeout,)
+#define vs_wait_send_nointr(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, -1,)
+#define vs_wait_send_nointr_timeout(_state, _cond, _send_func, _timeout) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, _timeout,)
+#define vs_wait_send_bh(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, -1, _bh)
+#define vs_wait_send_timeout_bh(_state, _cond, _send_func, _timeout) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, \
+			_timeout, _bh)
+#define vs_wait_send_nointr_bh(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, -1, _bh)
+#define vs_wait_send_nointr_timeout_bh(_state, _cond, _send_func, _timeout) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, \
+			_timeout, _bh)
+#define vs_wait_send_locked(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, false, true, true, -1,)
+#define vs_wait_send_locked_nocheck(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, false, false, true, -1,)
+
+/* Legacy wait macros, to be removed and replaced with those above. */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_waiting_send(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, true, true, true, -1,)
+#define vs_service_waiting_send_nointr(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, true, true, false, -1,)
+#define vs_service_waiting_send_cond(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, -1,)
+#define vs_service_waiting_send_cond_nointr(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, -1,)
+#define vs_service_waiting_send_nocheck(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, true, false, true, -1,)
+
+#endif /* _VSERVICE_SERVICE_WAIT_H */
diff --git a/mm/memory.c b/mm/memory.c
index ae8991f..5c5df53 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3514,7 +3514,7 @@
 }
 
 static unsigned long fault_around_bytes __read_mostly =
-	rounddown_pow_of_two(4096);
+	rounddown_pow_of_two(65536);
 
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3e004d1..edbeb4d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1119,7 +1119,7 @@
 		struct address_space *mapping;
 		struct page *page;
 		int may_enter_fs;
-		enum page_references references = PAGEREF_RECLAIM_CLEAN;
+		enum page_references references = PAGEREF_RECLAIM;
 		bool dirty, writeback;
 
 		cond_resched();
@@ -1514,6 +1514,8 @@
 		.gfp_mask = GFP_KERNEL,
 		.priority = DEF_PRIORITY,
 		.may_unmap = 1,
+		/* Doesn't allow to write out dirty page */
+		.may_writepage = 0,
 	};
 	unsigned long ret;
 	struct page *page, *next;