driver: vservices: Add the vservices framework and core

Adds the Virtual Services framework and core protocol code.

The Virtual Services framework provides a bus for generic inter-vm
communications using a high level abstract model. The vservices
bus provides support for both HLOS and embedded C clients and servers,
allowing VMs to communicate in a common OS independent manner.

The vservices bus and services over it are hot-plug capable and
can support a wide variety of use cases, including device virtualization
using virtual device protocol (classes) and drivers, similar in
concept to USB or virtual IO.

Change-Id: I7a696354f59730e0ad340fb92dc85661a7376dee
Signed-off-by: Carl van Schaik <carl@cog.systems>
Git-commit: 42814676e8bf5fb34060ee80e05e2175ae146292
Git-repo: https://github.com/CogSystems/linux-msm.git
Signed-off-by: Minming Qi <mqi@codeaurora.org>
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9480d84..5960816 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -138,6 +138,8 @@
 
 source "drivers/xen/Kconfig"
 
+source "drivers/vservices/Kconfig"
+
 source "drivers/staging/Kconfig"
 
 source "drivers/platform/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 06e2bb4..557cba5 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -10,6 +10,8 @@
 
 obj-$(CONFIG_GENERIC_PHY)	+= phy/
 
+obj-$(CONFIG_VSERVICES_SUPPORT)	+= vservices/
+
 # GPIO must come after pinctrl as gpios may need to mux pins etc
 obj-$(CONFIG_PINCTRL)		+= pinctrl/
 obj-$(CONFIG_GPIOLIB)		+= gpio/
diff --git a/drivers/vservices/Kconfig b/drivers/vservices/Kconfig
new file mode 100644
index 0000000..16b3bda
--- /dev/null
+++ b/drivers/vservices/Kconfig
@@ -0,0 +1,81 @@
+#
+# OKL4 Virtual Services framework
+#
+
+menuconfig VSERVICES_SUPPORT
+	tristate "OKL4 Virtual Services support"
+	default OKL4_GUEST || OKL4_VIRTUALISATION
+	select HOTPLUG
+	help
+	  This option adds core support for OKL4 Virtual Services. The Virtual
+	  Services framework is an inter-OS device/service sharing
+	  protocol which is supported on OKL4 Microvisor virtualization
+	  platforms. You will also need drivers from the following menu in
+	  order to make use of it.
+
+if VSERVICES_SUPPORT
+
+config VSERVICES_CHAR_DEV
+	bool "Virtual Services user-space service API"
+	default y
+	help
+	  Select this if you want to use user-space service drivers. You will
+	  also need udev rules that create device nodes, and protocol code
+	  generated by the OK Mill tool.
+
+config VSERVICES_DEBUG
+	bool "Virtual Services debugging support"
+	help
+	  Select this if you want to enable Virtual Services core framework
+	  debugging. The debug messages for various components of the Virtual
+	  Services core framework can be toggled at runtime on a per-session
+	  basis via sysfs. When Virtual Services debugging is enabled here,
+	  but disabled at runtime it has a minimal performance impact.
+
+config VSERVICES_LOCK_DEBUG
+	bool "Debug Virtual Services state locks"
+	default DEBUG_KERNEL
+	help
+	  This option enables some runtime checks that Virtual Services
+	  state lock functions are used correctly in service drivers.
+
+config VSERVICES_SERVER
+	tristate "Virtual Services server support"
+	depends on SYSFS
+	default y
+	help
+	  This option adds support for Virtual Services servers, which allows
+	  exporting of services from this Linux to other environments. Servers
+	  are created at runtime by writing to files in
+	  /sys/bus/vservices-server.
+
+config VSERVICES_CLIENT
+	tristate "Virtual Services client support"
+	default y
+	help
+	  This option adds support for Virtual Services clients, which allows
+	  connecting to services exported from other environments.
+
+config VSERVICES_SKELETON_DRIVER
+	tristate "Virtual Services skeleton driver"
+	depends on VSERVICES_SERVER || VSERVICES_CLIENT
+	default n
+	help
+	  This option adds support for a skeleton virtual service driver. This
+	  driver can be used for templating or testing of virtual service
+	  drivers. If unsure say N.
+
+config VSERVICES_NAMED_DEVICE
+	bool "Virtual Services use named device node in /dev"
+	default n
+	help
+	  Select this if you want to use a named device name over a numeric
+	  device name in /dev
+
+source "drivers/vservices/transport/Kconfig"
+
+source "drivers/vservices/protocol/Kconfig"
+
+source "drivers/vservices/Kconfig.stacks"
+
+endif # VSERVICES_SUPPORT
diff --git a/drivers/vservices/Kconfig.stacks b/drivers/vservices/Kconfig.stacks
new file mode 100644
index 0000000..97eba53
--- /dev/null
+++ b/drivers/vservices/Kconfig.stacks
@@ -0,0 +1,7 @@
+#
+# vServices drivers configuration
+#
+
+menu "Client and Server drivers"
+
+endmenu
diff --git a/drivers/vservices/Makefile b/drivers/vservices/Makefile
new file mode 100644
index 0000000..4ce9e48
--- /dev/null
+++ b/drivers/vservices/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Werror
+ccflags-$(CONFIG_VSERVICES_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VSERVICES_SUPPORT)	+= vservices.o
+vservices-objs-$(CONFIG_VSERVICES_CHAR_DEV) += devio.o
+vservices-objs = session.o $(vservices-objs-y)
+
+obj-$(CONFIG_VSERVICES_CLIENT) += core_client.o
+obj-$(CONFIG_VSERVICES_SERVER) += core_server.o
+
+obj-$(CONFIG_VSERVICES_SKELETON_DRIVER) += vservices_skeleton_driver.o
+vservices_skeleton_driver-objs = skeleton_driver.o
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += protocol/
diff --git a/drivers/vservices/compat.h b/drivers/vservices/compat.h
new file mode 100644
index 0000000..5f6926d
--- /dev/null
+++ b/drivers/vservices/compat.h
@@ -0,0 +1,59 @@
+/*
+ * drivers/vservices/compat.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Wrapper functions/definitions for compatibility between differnet kernel
+ * versions.
+ */
+
+#ifndef _VSERVICES_COMPAT_H
+#define _VSERVICES_COMPAT_H
+
+#include <linux/workqueue.h>
+#include <linux/version.h>
+
+/* The INIT_WORK_ONSTACK macro has a slightly different name in older kernels */
+#ifndef INIT_WORK_ONSTACK
+#define INIT_WORK_ONSTACK(_work, _func) INIT_WORK_ON_STACK(_work, _func)
+#endif
+
+/*
+ * We require a workqueue with  no concurrency. This is provided by
+ * create_singlethread_workqueue() in kernel prior to 2.6.36.
+ * In later versions, create_singlethread_workqueue() enables WQ_MEM_RECLAIM and
+ * thus WQ_RESCUER, which allows work items to be grabbed by a rescuer thread
+ * and run concurrently if the queue is running too slowly. We must use
+ * alloc_ordered_workqueue() instead, to disable the rescuer.
+ */
+static inline struct workqueue_struct *
+vs_create_workqueue(const char *name)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+	return create_singlethread_workqueue(name);
+#else
+	return alloc_ordered_workqueue(name, 0);
+#endif
+}
+
+/*
+ * The max3 macro has only been present from 2.6.37
+ * (commit: f27c85c56b32c42bcc54a43189c1e00fdceb23ec)
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
+#define max3(x, y, z) ({			\
+	typeof(x) _max1 = (x);			\
+	typeof(y) _max2 = (y);			\
+	typeof(z) _max3 = (z);			\
+	(void) (&_max1 == &_max2);		\
+	(void) (&_max1 == &_max3);		\
+	_max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
+		(_max2 > _max3 ? _max2 : _max3); })
+#endif
+
+#endif /* _VSERVICES_COMPAT_H */
diff --git a/drivers/vservices/core_client.c b/drivers/vservices/core_client.c
new file mode 100644
index 0000000..4cc78ac
--- /dev/null
+++ b/drivers/vservices/core_client.c
@@ -0,0 +1,733 @@
+/*
+ * drivers/vservices/core_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Client side core service application driver. This is responsible for:
+ *
+ *  - automatically connecting to the server when it becomes ready;
+ *  - sending a reset command to the server if something has gone wrong; and
+ *  - enumerating all the available services.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <vservices/types.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/client.h>
+
+#include "session.h"
+#include "transport.h"
+#include "compat.h"
+
+struct core_client {
+	struct vs_client_core_state	state;
+	struct vs_service_device	*service;
+
+	struct list_head		message_queue;
+	struct mutex			message_queue_lock;
+	struct work_struct		message_queue_work;
+};
+
+struct pending_reset {
+	struct vs_service_device	*service;
+	struct list_head		list;
+};
+
+#define to_core_client(x)	container_of(x, struct core_client, state)
+#define dev_to_core_client(x)	to_core_client(dev_get_drvdata(x))
+
+static int vs_client_core_fatal_error(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+
+	/* Force a transport level reset */
+	dev_err(&client->service->dev," Fatal error - resetting session\n");
+	return -EPROTO;
+}
+
+static struct core_client *
+vs_client_session_core_client(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service = session->core_service;
+
+	if (!core_service)
+		return NULL;
+
+	return dev_to_core_client(&core_service->dev);
+}
+
+static ssize_t client_core_reset_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *core_service = to_vs_service_device(dev);
+	struct vs_session_device *session =
+		vs_service_get_session(core_service);
+	struct vs_service_device *target;
+	vs_service_id_t service_id;
+	unsigned long val;
+	int err;
+
+	/* Writing a valid service id to this file resets that service */
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	service_id = val;
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -ENODEV;
+
+	err = vs_service_reset(target, core_service);
+
+	vs_put_service(target);
+	return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR(reset_service, S_IWUSR, NULL,
+		client_core_reset_service_store);
+
+static struct attribute *client_core_dev_attrs[] = {
+	&dev_attr_reset_service.attr,
+	NULL,
+};
+
+static const struct attribute_group client_core_attr_group = {
+	.attrs = client_core_dev_attrs,
+};
+
+/*
+ * Protocol callbacks
+ */
+static int
+vs_client_core_handle_service_removed(struct vs_client_core_state *state,
+		u32 service_id)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+			vs_service_get_session(client->service);
+	struct vs_service_device *service;
+	int ret;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service)
+		return -EINVAL;
+
+	ret = vs_service_handle_delete(service);
+	vs_put_service(service);
+	return ret;
+}
+
+static int vs_client_core_create_service(struct core_client *client,
+		struct vs_session_device *session, vs_service_id_t service_id,
+		struct vs_string *protocol_name_string,
+		struct vs_string *service_name_string)
+{
+	char *protocol_name, *service_name;
+	struct vs_service_device *service;
+	int ret = 0;
+
+	protocol_name = vs_string_dup(protocol_name_string, GFP_KERNEL);
+	if (!protocol_name) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	service_name = vs_string_dup(service_name_string, GFP_KERNEL);
+	if (!service_name) {
+		ret = -ENOMEM;
+		goto out_free_protocol_name;
+	}
+
+	service = vs_service_register(session, client->service, service_id,
+			protocol_name, service_name, NULL);
+	if (IS_ERR(service)) {
+		ret = PTR_ERR(service);
+		goto out_free_service_name;
+	}
+
+	vs_service_start(service);
+
+out_free_service_name:
+	kfree(service_name);
+out_free_protocol_name:
+	kfree(protocol_name);
+out:
+	return ret;
+}
+
+static int
+vs_client_core_handle_service_created(struct vs_client_core_state *state,
+		u32 service_id, struct vs_string service_name,
+		struct vs_string protocol_name, struct vs_mbuf *mbuf)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+			vs_service_get_session(client->service);
+	int err;
+
+	vs_dev_debug(VS_DEBUG_CLIENT_CORE,
+			vs_service_get_session(client->service),
+			&client->service->dev, "Service info for %d received\n",
+			service_id);
+
+	err = vs_client_core_create_service(client, session, service_id,
+			&protocol_name, &service_name);
+	if (err)
+		dev_err(&session->dev,
+				"Failed to create service with id %d: %d\n",
+				service_id, err);
+
+	vs_client_core_core_free_service_created(state, &service_name,
+			&protocol_name, mbuf);
+
+	return err;
+}
+
+static int
+vs_client_core_send_service_reset(struct core_client *client,
+		struct vs_service_device *service)
+{
+	return vs_client_core_core_send_service_reset(&client->state,
+			service->id, GFP_KERNEL);
+}
+
+static int
+vs_client_core_queue_service_reset(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_client *client =
+		vs_client_session_core_client(session);
+	struct pending_reset *msg;
+
+	if (!client)
+		return -ENODEV;
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+			"Sending reset for service %d\n", service->id);
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	mutex_lock(&client->message_queue_lock);
+
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+	list_add_tail(&msg->list, &client->message_queue);
+
+	mutex_unlock(&client->message_queue_lock);
+	queue_work(client->service->work_queue, &client->message_queue_work);
+
+	return 0;
+}
+
+static int vs_core_client_tx_ready(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+
+	queue_work(client->service->work_queue, &client->message_queue_work);
+
+	return 0;
+}
+
+static void message_queue_work(struct work_struct *work)
+{
+	struct core_client *client = container_of(work, struct core_client,
+			message_queue_work);
+	struct vs_session_device *session =
+		vs_service_get_session(client->service);
+	struct pending_reset *msg;
+	int err;
+
+	vs_service_state_lock(client->service);
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(client->state.state.core)) {
+		vs_service_state_unlock(client->service);
+		return;
+	}
+
+	vs_dev_debug(VS_DEBUG_CLIENT, session, &session->dev, "tx_ready\n");
+
+	mutex_lock(&client->message_queue_lock);
+	while (!list_empty(&client->message_queue)) {
+		msg = list_first_entry(&client->message_queue,
+				struct pending_reset, list);
+
+		err = vs_client_core_send_service_reset(client, msg->service);
+
+		/* If we're out of quota there's no point continuing */
+		if (err == -ENOBUFS)
+			break;
+
+		/* Any other error is fatal */
+		if (err < 0) {
+			dev_err(&client->service->dev,
+					"Failed to send pending reset for %d (%d) - resetting session",
+					msg->service->id, err);
+			vs_service_reset_nosync(client->service);
+			break;
+		}
+
+		/*
+		 * The message sent successfully - remove it from the queue.
+		 * The corresponding vs_get_service() was done when the pending
+		 * message was enqueued.
+		 */
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+	mutex_unlock(&client->message_queue_lock);
+	vs_service_state_unlock(client->service);
+}
+
+static int
+vs_client_core_handle_server_ready(struct vs_client_core_state *state,
+		u32 service_id, u32 in_quota, u32 out_quota, u32 in_bit_offset,
+		u32 in_num_bits, u32 out_bit_offset, u32 out_num_bits)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session;
+	struct vs_service_device *service;
+	int ret;
+
+	if (service_id == 0)
+		return -EPROTO;
+
+	if (!in_quota || !out_quota)
+		return -EINVAL;
+
+	session = vs_service_get_session(client->service);
+	service = vs_session_get_service(session, service_id);
+	if (!service)
+		return -EINVAL;
+
+	service->send_quota = in_quota;
+	service->recv_quota = out_quota;
+	service->notify_send_offset = in_bit_offset;
+	service->notify_send_bits = in_num_bits;
+	service->notify_recv_offset = out_bit_offset;
+	service->notify_recv_bits = out_num_bits;
+
+	ret = vs_service_enable(service);
+	vs_put_service(service);
+	return ret;
+}
+
+static int
+vs_client_core_handle_service_reset(struct vs_client_core_state *state,
+		u32 service_id)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session;
+
+	if (service_id == 0)
+		return -EPROTO;
+
+	session = vs_service_get_session(client->service);
+
+	return vs_service_handle_reset(session, service_id, true);
+}
+
+static void vs_core_client_start(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+			vs_service_get_session(client->service);
+
+	/* FIXME - start callback should return int */
+	vs_dev_debug(VS_DEBUG_CLIENT_CORE, session, &client->service->dev,
+			"Core client start\n");
+}
+
+static void vs_core_client_reset(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+		vs_service_get_session(client->service);
+	struct pending_reset *msg;
+
+	/* Flush the pending resets - we're about to delete everything */
+	while (!list_empty(&client->message_queue)) {
+		msg = list_first_entry(&client->message_queue,
+				struct pending_reset, list);
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+
+	vs_session_delete_noncore(session);
+
+	/* Return to the initial quotas, until the next startup message */
+	client->service->send_quota = 0;
+	client->service->recv_quota = 1;
+}
+
+static int vs_core_client_startup(struct vs_client_core_state *state,
+		u32 core_in_quota, u32 core_out_quota)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_service_device *service = state->service;
+	struct vs_session_device *session = vs_service_get_session(service);
+	int ret;
+
+	if (!core_in_quota || !core_out_quota)
+		return -EINVAL;
+
+	/*
+	 * Update the service struct with our real quotas and tell the
+	 * transport about the change
+	 */
+
+	service->send_quota = core_in_quota;
+	service->recv_quota = core_out_quota;
+	ret = session->transport->vt->service_start(session->transport, service);
+	if (ret < 0)
+		return ret;
+
+	WARN_ON(!list_empty(&client->message_queue));
+
+	return vs_client_core_core_req_connect(state, GFP_KERNEL);
+}
+
+static struct vs_client_core_state *
+vs_core_client_alloc(struct vs_service_device *service)
+{
+	struct core_client *client;
+	int err;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		goto fail;
+
+	client->service = service;
+	INIT_LIST_HEAD(&client->message_queue);
+	INIT_WORK(&client->message_queue_work, message_queue_work);
+	mutex_init(&client->message_queue_lock);
+
+	err = sysfs_create_group(&service->dev.kobj, &client_core_attr_group);
+	if (err)
+		goto fail_free_client;
+
+	/*
+	 * Default transport resources for the core service client. The
+	 * server will inform us of the real quotas in the startup message.
+	 * Note that it is important that the quotas never decrease, so these
+	 * numbers are as small as possible.
+	 */
+	service->send_quota = 0;
+	service->recv_quota = 1;
+	service->notify_send_bits = 0;
+	service->notify_send_offset = 0;
+	service->notify_recv_bits = 0;
+	service->notify_recv_offset = 0;
+
+	return &client->state;
+
+fail_free_client:
+	kfree(client);
+fail:
+	return NULL;
+}
+
+static void vs_core_client_release(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+
+	sysfs_remove_group(&client->service->dev.kobj, &client_core_attr_group);
+	kfree(client);
+}
+
+static struct vs_client_core vs_core_client_driver = {
+	.alloc		= vs_core_client_alloc,
+	.release	= vs_core_client_release,
+	.start		= vs_core_client_start,
+	.reset		= vs_core_client_reset,
+	.tx_ready	= vs_core_client_tx_ready,
+
+	.core = {
+		.nack_connect		= vs_client_core_fatal_error,
+
+		/* FIXME: Jira ticket SDK-3074 - ryanm. */
+		.ack_disconnect		= vs_client_core_fatal_error,
+		.nack_disconnect	= vs_client_core_fatal_error,
+
+		.msg_service_created	= vs_client_core_handle_service_created,
+		.msg_service_removed	= vs_client_core_handle_service_removed,
+
+		.msg_startup		= vs_core_client_startup,
+		/* FIXME: Jira ticket SDK-3074 - philipd. */
+		.msg_shutdown		= vs_client_core_fatal_error,
+		.msg_server_ready	= vs_client_core_handle_server_ready,
+		.msg_service_reset	= vs_client_core_handle_service_reset,
+	},
+};
+
+/*
+ * Client bus driver
+ */
+static int vs_client_bus_match(struct device *dev, struct device_driver *driver)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
+
+	/* Don't match anything to the devio driver; it's bound manually */
+	if (!vsdrv->protocol)
+		return 0;
+
+	WARN_ON_ONCE(service->is_server || vsdrv->is_server);
+
+	/* Match if the protocol strings are the same */
+	if (strcmp(service->protocol, vsdrv->protocol) == 0)
+		return 1;
+
+	return 0;
+}
+
+static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
+}
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
+}
+
+static ssize_t dev_protocol_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
+}
+
+static ssize_t service_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
+}
+
+static ssize_t quota_in_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->send_quota);
+}
+
+static ssize_t quota_out_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->recv_quota);
+}
+
+static struct device_attribute vs_client_dev_attrs[] = {
+	__ATTR_RO(id),
+	__ATTR_RO(is_server),
+	__ATTR(protocol, S_IRUGO, dev_protocol_show, NULL),
+	__ATTR_RO(service_name),
+	__ATTR_RO(quota_in),
+	__ATTR_RO(quota_out),
+	__ATTR_NULL
+};
+
+static ssize_t protocol_show(struct device_driver *drv, char *buf)
+{
+	struct vs_service_driver *driver = to_vs_service_driver(drv);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", driver->protocol);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+static struct driver_attribute vs_client_drv_attrs[] = {
+	__ATTR_RO(protocol),
+	__ATTR_NULL
+};
+#else
+static DRIVER_ATTR_RO(protocol);
+
+static struct attribute *vs_client_drv_attrs[] = {
+	&driver_attr_protocol.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vs_client_drv);
+#endif
+
+struct bus_type vs_client_bus_type = {
+	.name		= "vservices-client",
+	.dev_attrs	= vs_client_dev_attrs,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+	.drv_attrs	= vs_client_drv_attrs,
+#else
+	.drv_groups	= vs_client_drv_groups,
+#endif
+	.match		= vs_client_bus_match,
+	.probe		= vs_service_bus_probe,
+	.remove		= vs_service_bus_remove,
+	.uevent		= vs_service_bus_uevent,
+};
+EXPORT_SYMBOL(vs_client_bus_type);
+
+/*
+ * Client session driver
+ */
+static int vs_client_session_probe(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_service_device *service;
+	char *protocol, *name;
+	int ret = 0;
+
+	if (session->is_server) {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	/* create a service for the core protocol client */
+	protocol = kstrdup(VSERVICE_CORE_PROTOCOL_NAME, GFP_KERNEL);
+	if (!protocol) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	name = kstrdup("core", GFP_KERNEL);
+	if (!name) {
+		ret = -ENOMEM;
+		goto fail_free_protocol;
+	}
+
+	service = vs_service_register(session, NULL, 0, protocol, name, NULL);
+	if (IS_ERR(service)) {
+		ret = PTR_ERR(service);
+		goto fail_free_name;
+	}
+
+fail_free_name:
+	kfree(name);
+fail_free_protocol:
+	kfree(protocol);
+fail:
+	return ret;
+}
+
+static int
+vs_client_session_send_service_reset(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	if (WARN_ON(service->id == 0))
+		return -EINVAL;
+
+	return vs_client_core_queue_service_reset(session, service);
+}
+
+static struct vs_session_driver vs_client_session_driver = {
+	.driver	= {
+		.name			= "vservices-client-session",
+		.owner			= THIS_MODULE,
+		.bus			= &vs_session_bus_type,
+		.probe			= vs_client_session_probe,
+		.suppress_bind_attrs	= true,
+	},
+	.is_server		= false,
+	.service_bus		= &vs_client_bus_type,
+	.service_local_reset	= vs_client_session_send_service_reset,
+};
+
+static int __init vs_core_client_init(void)
+{
+	int ret;
+
+	ret = bus_register(&vs_client_bus_type);
+	if (ret)
+		goto fail_bus_register;
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	vs_devio_client_driver.driver.bus = &vs_client_bus_type;
+	vs_devio_client_driver.driver.owner = THIS_MODULE;
+	ret = driver_register(&vs_devio_client_driver.driver);
+	if (ret)
+		goto fail_devio_register;
+#endif
+
+	ret = driver_register(&vs_client_session_driver.driver);
+	if (ret)
+		goto fail_driver_register;
+
+	ret = vservice_core_client_register(&vs_core_client_driver,
+			"vs_core_client");
+	if (ret)
+		goto fail_core_register;
+
+	vservices_client_root = kobject_create_and_add("client-sessions",
+			vservices_root);
+	if (!vservices_client_root) {
+		ret = -ENOMEM;
+		goto fail_create_root;
+	}
+
+	return 0;
+
+fail_create_root:
+	vservice_core_client_unregister(&vs_core_client_driver);
+fail_core_register:
+	driver_unregister(&vs_client_session_driver.driver);
+fail_driver_register:
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_client_driver.driver);
+	vs_devio_client_driver.driver.bus = NULL;
+	vs_devio_client_driver.driver.owner = NULL;
+fail_devio_register:
+#endif
+	bus_unregister(&vs_client_bus_type);
+fail_bus_register:
+	return ret;
+}
+
+static void __exit vs_core_client_exit(void)
+{
+	kobject_put(vservices_client_root);
+	vservice_core_client_unregister(&vs_core_client_driver);
+	driver_unregister(&vs_client_session_driver.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_client_driver.driver);
+	vs_devio_client_driver.driver.bus = NULL;
+	vs_devio_client_driver.driver.owner = NULL;
+#endif
+	bus_unregister(&vs_client_bus_type);
+}
+
+subsys_initcall(vs_core_client_init);
+module_exit(vs_core_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Core Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/core_server.c b/drivers/vservices/core_server.c
new file mode 100644
index 0000000..76ca83c
--- /dev/null
+++ b/drivers/vservices/core_server.c
@@ -0,0 +1,1651 @@
+/*
+ * drivers/vservices/core_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Server side core service application driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+
+#include <vservices/types.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/server.h>
+
+#include "transport.h"
+#include "session.h"
+#include "compat.h"
+
+#define VSERVICE_CORE_SERVICE_NAME	"core"
+
+struct core_server {
+	struct vs_server_core_state	state;
+	struct vs_service_device	*service;
+
+	/*
+	 * A list of messages to send, a mutex protecting it, and a
+	 * work item to process the list.
+	 */
+	struct list_head		message_queue;
+	struct mutex			message_queue_lock;
+	struct work_struct		message_queue_work;
+
+	struct mutex			alloc_lock;
+
+	/* The following are all protected by alloc_lock. */
+	unsigned long			*in_notify_map;
+	int				in_notify_map_bits;
+
+	unsigned long			*out_notify_map;
+	int				out_notify_map_bits;
+
+	unsigned			in_quota_remaining;
+	unsigned			out_quota_remaining;
+};
+
+/*
+ * Used for message deferral when the core service is over quota.
+ */
+struct pending_message {
+	vservice_core_message_id_t		type;
+	struct vs_service_device		*service;
+	struct list_head			list;
+};
+
+#define to_core_server(x)	container_of(x, struct core_server, state)
+#define dev_to_core_server(x)	to_core_server(dev_get_drvdata(x))
+
+static struct vs_session_device *
+vs_core_server_session(struct core_server *server)
+{
+	return vs_service_get_session(server->service);
+}
+
+static struct core_server *
+vs_server_session_core_server(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service = session->core_service;
+
+	if (!core_service)
+		return NULL;
+
+	return dev_to_core_server(&core_service->dev);
+}
+
+static int vs_server_core_send_service_removed(struct core_server *server,
+		struct vs_service_device *service)
+{
+	return vs_server_core_core_send_service_removed(&server->state,
+			service->id, GFP_KERNEL);
+}
+
+static bool
+cancel_pending_created(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	list_for_each_entry(msg, &server->message_queue, list) {
+		if (msg->type == VSERVICE_CORE_CORE_MSG_SERVICE_CREATED &&
+				msg->service == service) {
+			vs_put_service(msg->service);
+			list_del(&msg->list);
+			kfree(msg);
+
+			/* there can only be one */
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int vs_server_core_queue_service_removed(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	mutex_lock(&server->message_queue_lock);
+
+	/*
+	 * If we haven't sent the notification that the service was created,
+	 * nuke it and do nothing else.
+	 *
+	 * This is not just an optimisation; see below.
+	 */
+	if (cancel_pending_created(server, service)) {
+		mutex_unlock(&server->message_queue_lock);
+		return 0;
+	}
+
+	/*
+	 * Do nothing if the core state is not connected. We must avoid
+	 * queueing service_removed messages on a reset service.
+	 *
+	 * Note that we cannot take the core server state lock here, because
+	 * we may (or may not) have been called from a core service message
+	 * handler. Thus, we must beware of races with changes to this
+	 * condition:
+	 *
+	 * - It becomes true when the req_connect handler sends an
+	 *   ack_connect, *after* it queues service_created for each existing
+	 *   service (while holding the service ready lock). The handler sends
+	 *   ack_connect with the message queue lock held.
+	 *
+	 *   - If we see the service as connected, then the req_connect
+	 *     handler has already queued and sent a service_created for this
+	 *     service, so it's ok for us to send a service_removed.
+	 *
+	 *   - If we see it as disconnected, the req_connect handler hasn't
+	 *     taken the message queue lock to send ack_connect yet, and thus
+	 *     has not released the service state lock; so if it queued a
+	 *     service_created we caught it in the flush above before it was
+	 *     sent.
+	 *
+	 * - It becomes false before the reset / disconnect handlers are
+	 *   called and those will both flush the message queue afterwards.
+	 *
+	 *   - If we see the service as connected, then the reset / disconnect
+	 *     handler is going to flush the message.
+	 *
+	 *   - If we see it disconnected, the state change has occurred and
+	 *     implicitly had the same effect as this message, so doing
+	 *     nothing is correct.
+	 *
+	 * Note that ordering in all of the above cases is guaranteed by the
+	 * message queue lock.
+	 */
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+		mutex_unlock(&server->message_queue_lock);
+		return 0;
+	}
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&server->message_queue_lock);
+		return -ENOMEM;
+	}
+
+	msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+
+	list_add_tail(&msg->list, &server->message_queue);
+
+	mutex_unlock(&server->message_queue_lock);
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static int vs_server_core_send_service_created(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct vs_session_device *session =
+			vs_service_get_session(server->service);
+
+	struct vs_mbuf *mbuf;
+	struct vs_string service_name, protocol_name;
+	size_t service_name_len, protocol_name_len;
+
+	int err;
+
+	mbuf = vs_server_core_core_alloc_service_created(&server->state,
+			&service_name, &protocol_name, GFP_KERNEL);
+
+	if (IS_ERR(mbuf))
+		return PTR_ERR(mbuf);
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+			"Sending service created message for %d (%s:%s)\n",
+			service->id, service->name, service->protocol);
+
+	service_name_len = strlen(service->name);
+	protocol_name_len = strlen(service->protocol);
+
+	if (service_name_len > vs_string_max_size(&service_name) ||
+			protocol_name_len > vs_string_max_size(&protocol_name)) {
+		dev_err(&session->dev,
+				"Invalid name/protocol for service %d (%s:%s)\n",
+				service->id, service->name,
+				service->protocol);
+		err = -EINVAL;
+		goto fail;
+	}
+
+	vs_string_copyin(&service_name, service->name);
+	vs_string_copyin(&protocol_name, service->protocol);
+
+	err = vs_server_core_core_send_service_created(&server->state,
+			service->id, service_name, protocol_name, mbuf);
+	if (err) {
+		dev_err(&session->dev,
+				"Fatal error sending service creation message for %d (%s:%s): %d\n",
+				service->id, service->name,
+				service->protocol, err);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	vs_server_core_core_free_service_created(&server->state,
+			&service_name, &protocol_name, mbuf);
+
+	return err;
+}
+
+static int vs_server_core_queue_service_created(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	lockdep_assert_held(&service->ready_lock);
+	lockdep_assert_held(&server->service->state_mutex);
+
+	mutex_lock(&server->message_queue_lock);
+
+	/*  Do nothing if the core state is disconnected.  */
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+		mutex_unlock(&server->message_queue_lock);
+		return 0;
+	}
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&server->message_queue_lock);
+		return -ENOMEM;
+	}
+
+	msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+
+	list_add_tail(&msg->list, &server->message_queue);
+
+	mutex_unlock(&server->message_queue_lock);
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static struct vs_service_device *
+__vs_server_core_register_service(struct vs_session_device *session,
+		vs_service_id_t service_id, struct vs_service_device *owner,
+		const char *name, const char *protocol, const void *plat_data)
+{
+	if (!session->is_server)
+		return ERR_PTR(-ENODEV);
+
+	if (!name || strnlen(name, VSERVICE_CORE_SERVICE_NAME_SIZE + 1) >
+			VSERVICE_CORE_SERVICE_NAME_SIZE || name[0] == '\n')
+		return ERR_PTR(-EINVAL);
+
+	/* The server core must only be registered as service_id zero */
+	if (service_id == 0 && (owner != NULL ||
+			strcmp(name, VSERVICE_CORE_SERVICE_NAME) != 0 ||
+			strcmp(protocol, VSERVICE_CORE_PROTOCOL_NAME) != 0))
+		return ERR_PTR(-EINVAL);
+
+	return vs_service_register(session, owner, service_id, protocol, name,
+			plat_data);
+}
+
+static struct vs_service_device *
+vs_server_core_create_service(struct core_server *server,
+		struct vs_session_device *session,
+		struct vs_service_device *owner, vs_service_id_t service_id,
+		const char *name, const char *protocol, const void *plat_data)
+{
+	struct vs_service_device *service;
+
+	service = __vs_server_core_register_service(session, service_id,
+			owner, name, protocol, plat_data);
+	if (IS_ERR(service))
+		return service;
+
+	if (protocol) {
+		vs_service_state_lock(server->service);
+		vs_service_start(service);
+		if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
+			vs_service_enable(service);
+		vs_service_state_unlock(server->service);
+	}
+
+	return service;
+}
+
+static int
+vs_server_core_send_service_reset_ready(struct core_server *server,
+		vservice_core_message_id_t type,
+		struct vs_service_device *service)
+{
+	bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(server->service);
+	int err;
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+			"Sending %s for service %d\n",
+			is_reset ? "reset" : "ready", service->id);
+
+	if (is_reset)
+		err = vs_server_core_core_send_service_reset(&server->state,
+				service->id, GFP_KERNEL);
+	else
+		err = vs_server_core_core_send_server_ready(&server->state,
+				service->id, service->recv_quota,
+				service->send_quota,
+				service->notify_recv_offset,
+				service->notify_recv_bits,
+				service->notify_send_offset,
+				service->notify_send_bits,
+				GFP_KERNEL);
+
+	return err;
+}
+
+static bool
+cancel_pending_ready(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	list_for_each_entry(msg, &server->message_queue, list) {
+		if (msg->type == VSERVICE_CORE_CORE_MSG_SERVER_READY &&
+				msg->service == service) {
+			vs_put_service(msg->service);
+			list_del(&msg->list);
+			kfree(msg);
+
+			/* there can only be one */
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int
+vs_server_core_queue_service_reset_ready(struct core_server *server,
+		vservice_core_message_id_t type,
+		struct vs_service_device *service)
+{
+	bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
+	struct pending_message *msg;
+
+	mutex_lock(&server->message_queue_lock);
+
+	/*
+	 * If this is a reset, and there is an outgoing ready in the
+	 * queue, we must cancel it so it can't be sent with invalid
+	 * transport resources, and then return immediately so we
+	 * don't send a redundant reset.
+	 */
+	if (is_reset && cancel_pending_ready(server, service)) {
+		mutex_unlock(&server->message_queue_lock);
+		return VS_SERVICE_ALREADY_RESET;
+	}
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&server->message_queue_lock);
+		return -ENOMEM;
+	}
+
+	msg->type = type;
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+	list_add_tail(&msg->list, &server->message_queue);
+
+	mutex_unlock(&server->message_queue_lock);
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static int vs_core_server_tx_ready(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(server->service);
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev, "tx_ready\n");
+
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static void message_queue_work(struct work_struct *work)
+{
+	struct core_server *server = container_of(work, struct core_server,
+			message_queue_work);
+	struct pending_message *msg;
+	int err;
+
+	vs_service_state_lock(server->service);
+
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+		vs_service_state_unlock(server->service);
+		return;
+	}
+
+	/*
+	 * If any pending message fails we exit the loop immediately so that
+	 * we preserve the message order.
+	 */
+	mutex_lock(&server->message_queue_lock);
+	while (!list_empty(&server->message_queue)) {
+		msg = list_first_entry(&server->message_queue,
+				struct pending_message, list);
+
+		switch (msg->type) {
+		case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
+			err = vs_server_core_send_service_created(server,
+					msg->service);
+			break;
+
+		case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
+			err = vs_server_core_send_service_removed(server,
+					msg->service);
+			break;
+
+		case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+		case VSERVICE_CORE_CORE_MSG_SERVER_READY:
+			err = vs_server_core_send_service_reset_ready(
+					server, msg->type, msg->service);
+			break;
+
+		default:
+			dev_warn(&server->service->dev,
+					"Don't know how to handle pending message type %d\n",
+					msg->type);
+			err = 0;
+			break;
+		}
+
+		/*
+		 * If we're out of quota we exit and wait for tx_ready to
+		 * queue us again.
+		 */
+		if (err == -ENOBUFS)
+			break;
+
+		/* Any other error is fatal */
+		if (err < 0) {
+			dev_err(&server->service->dev,
+					"Failed to send pending message type %d: %d - resetting session",
+					msg->type, err);
+			vs_service_reset_nosync(server->service);
+			break;
+		}
+
+		/*
+		 * The message sent successfully - remove it from the
+		 * queue. The corresponding vs_get_service() was done
+		 * when the pending message was created.
+		 */
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+	mutex_unlock(&server->message_queue_lock);
+
+	vs_service_state_unlock(server->service);
+
+	return;
+}
+
+/*
+ * Core server sysfs interface
+ */
+static ssize_t server_core_create_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = to_vs_session_device(dev->parent);
+	struct core_server *server = dev_to_core_server(&service->dev);
+	struct vs_service_device *new_service;
+	char *p;
+	ssize_t ret = count;
+
+	/* FIXME - Buffer sizes are not defined in generated headers */
+	/* discard leading whitespace */
+	while (count && isspace(*buf)) {
+		buf++;
+		count--;
+	}
+	if (!count) {
+		dev_info(dev, "empty service name");
+		return -EINVAL;
+	}
+	/* discard trailing whitespace */
+	while (count && isspace(buf[count - 1]))
+		count--;
+
+	if (count > VSERVICE_CORE_SERVICE_NAME_SIZE) {
+		dev_info(dev, "service name too long (max %d)\n", VSERVICE_CORE_SERVICE_NAME_SIZE);
+		return -EINVAL;
+	}
+
+	p = kstrndup(buf, count, GFP_KERNEL);
+
+	/*
+	 * Writing a service name to this file creates a new service. The
+	 * service is created without a protocol. It will appear in sysfs
+	 * but will not be bound to a driver until a valid protocol name
+	 * has been written to the created devices protocol sysfs attribute.
+	 */
+	new_service = vs_server_core_create_service(server, session, service,
+			VS_SERVICE_AUTO_ALLOCATE_ID, p, NULL, NULL);
+	if (IS_ERR(new_service))
+		ret = PTR_ERR(new_service);
+
+	kfree(p);
+
+	return ret;
+}
+
+static ssize_t server_core_reset_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *core_service = to_vs_service_device(dev);
+	struct vs_session_device *session =
+		vs_service_get_session(core_service);
+	struct vs_service_device *target;
+	vs_service_id_t service_id;
+	unsigned long val;
+	int err;
+
+	/*
+	 * Writing a valid service_id to this file does a reset of that service
+	 */
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	service_id = val;
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -EINVAL;
+
+	err = vs_service_reset(target, core_service);
+
+	vs_put_service(target);
+	return err < 0 ? err : count;
+}
+
+static ssize_t server_core_remove_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_service_device *target;
+	vs_service_id_t service_id;
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	service_id = val;
+	if (service_id == 0) {
+		/*
+		 * We don't allow removing the core service this way. The
+		 * core service will be removed when the session is removed.
+		 */
+		return -EINVAL;
+	}
+
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -EINVAL;
+
+	err = vs_service_delete(target, service);
+
+	vs_put_service(target);
+	return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR(create_service, S_IWUSR,
+		NULL, server_core_create_service_store);
+static DEVICE_ATTR(reset_service, S_IWUSR,
+		NULL, server_core_reset_service_store);
+static DEVICE_ATTR(remove_service, S_IWUSR,
+		NULL, server_core_remove_service_store);
+
+static struct attribute *server_core_dev_attrs[] = {
+	&dev_attr_create_service.attr,
+	&dev_attr_reset_service.attr,
+	&dev_attr_remove_service.attr,
+	NULL,
+};
+
+static const struct attribute_group server_core_attr_group = {
+	.attrs = server_core_dev_attrs,
+};
+
+static int init_transport_resource_allocation(struct core_server *server)
+{
+	struct vs_session_device *session = vs_core_server_session(server);
+	struct vs_transport *transport = session->transport;
+	size_t size;
+	int err;
+
+	mutex_init(&server->alloc_lock);
+	mutex_lock(&server->alloc_lock);
+
+	transport->vt->get_quota_limits(transport, &server->out_quota_remaining,
+			&server->in_quota_remaining);
+
+	transport->vt->get_notify_bits(transport, &server->out_notify_map_bits,
+			&server->in_notify_map_bits);
+
+	size = BITS_TO_LONGS(server->in_notify_map_bits) *
+			sizeof(unsigned long);
+	server->in_notify_map = kzalloc(size, GFP_KERNEL);
+	if (server->in_notify_map_bits && !server->in_notify_map) {
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	size = BITS_TO_LONGS(server->out_notify_map_bits) *
+			sizeof(unsigned long);
+	server->out_notify_map = kzalloc(size, GFP_KERNEL);
+	if (server->out_notify_map_bits && !server->out_notify_map) {
+		err = -ENOMEM;
+		goto fail_free_in_bits;
+	}
+
+	mutex_unlock(&server->alloc_lock);
+
+	return 0;
+
+fail_free_in_bits:
+	kfree(server->in_notify_map);
+fail:
+	mutex_unlock(&server->alloc_lock);
+	return err;
+}
+
+static int alloc_quota(unsigned minimum, unsigned best, unsigned set,
+		unsigned *remaining)
+{
+	unsigned quota;
+
+	if (set) {
+		quota = set;
+
+		if (quota > *remaining)
+			return -ENOSPC;
+	} else if (best) {
+		quota = min(best, *remaining);
+	} else {
+		quota = minimum;
+	}
+
+	if (quota < minimum)
+		return -ENOSPC;
+
+	*remaining -= quota;
+
+	return min_t(unsigned, quota, INT_MAX);
+}
+
+static int alloc_notify_bits(unsigned notify_count, unsigned long *map,
+		unsigned nr_bits)
+{
+	unsigned offset;
+
+	if (notify_count) {
+		offset = bitmap_find_next_zero_area(map, nr_bits, 0,
+				notify_count, 0);
+
+		if (offset >= nr_bits || offset > (unsigned)INT_MAX)
+			return -ENOSPC;
+
+		bitmap_set(map, offset, notify_count);
+	} else {
+		offset = 0;
+	}
+
+	return offset;
+}
+
+/*
+ * alloc_transport_resources - Allocates the quotas and notification bits for
+ * a service.
+ * @server: the core service state.
+ * @service: the service device to allocate resources for.
+ *
+ * This function allocates message quotas and notification bits. It is called
+ * for the core service in alloc(), and for every other service by the server
+ * bus probe() function.
+ */
+static int alloc_transport_resources(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(service);
+	unsigned in_bit_offset, out_bit_offset;
+	unsigned in_quota, out_quota;
+	int ret;
+	struct vs_service_driver *driver;
+
+	if (WARN_ON(!service->dev.driver))
+		return -ENODEV;
+
+	mutex_lock(&server->alloc_lock);
+
+	driver = to_vs_service_driver(service->dev.driver);
+
+	/* Quota allocations */
+	ret = alloc_quota(driver->in_quota_min, driver->in_quota_best,
+			service->in_quota_set, &server->in_quota_remaining);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate in quota\n");
+		goto fail_in_quota;
+	}
+	in_quota = ret;
+
+	ret = alloc_quota(driver->out_quota_min, driver->out_quota_best,
+			service->out_quota_set, &server->out_quota_remaining);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate out quota\n");
+		goto fail_out_quota;
+	}
+	out_quota = ret;
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+			"%d: quota in: %u out: %u; remaining in: %u out: %u\n",
+			service->id, in_quota, out_quota,
+			server->in_quota_remaining,
+			server->out_quota_remaining);
+
+	/* Notification bit allocations */
+	ret = alloc_notify_bits(service->notify_recv_bits,
+			server->in_notify_map, server->in_notify_map_bits);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate in notify bits\n");
+		goto fail_in_notify;
+	}
+	in_bit_offset = ret;
+
+	ret = alloc_notify_bits(service->notify_send_bits,
+			server->out_notify_map, server->out_notify_map_bits);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate out notify bits\n");
+		goto fail_out_notify;
+	}
+	out_bit_offset = ret;
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+			"notify bits in: %u/%u out: %u/%u\n",
+			in_bit_offset, service->notify_recv_bits,
+			out_bit_offset, service->notify_send_bits);
+
+	/* Fill in the device's allocations */
+	service->recv_quota = in_quota;
+	service->send_quota = out_quota;
+	service->notify_recv_offset = in_bit_offset;
+	service->notify_send_offset = out_bit_offset;
+
+	mutex_unlock(&server->alloc_lock);
+
+	return 0;
+
+fail_out_notify:
+	if (service->notify_recv_bits)
+		bitmap_clear(server->in_notify_map,
+				in_bit_offset, service->notify_recv_bits);
+fail_in_notify:
+	server->out_quota_remaining += out_quota;
+fail_out_quota:
+	server->in_quota_remaining += in_quota;
+fail_in_quota:
+
+	mutex_unlock(&server->alloc_lock);
+
+	service->recv_quota = 0;
+	service->send_quota = 0;
+	service->notify_recv_bits = 0;
+	service->notify_recv_offset = 0;
+	service->notify_send_bits = 0;
+	service->notify_send_offset = 0;
+
+	return ret;
+}
+
+/*
+ * free_transport_resources - Frees the quotas and notification bits for
+ * a non-core service.
+ * @server: the core service state.
+ * @service: the service device to free resources for.
+ *
+ * This function is called by the server to free message quotas and
+ * notification bits that were allocated by alloc_transport_resources. It must
+ * only be called when the target service is in reset, and must be called with
+ * the core service's state lock held.
+ */
+static int free_transport_resources(struct core_server *server,
+		struct vs_service_device *service)
+{
+	mutex_lock(&server->alloc_lock);
+
+	if (service->notify_recv_bits)
+		bitmap_clear(server->in_notify_map,
+				service->notify_recv_offset,
+				service->notify_recv_bits);
+
+	if (service->notify_send_bits)
+		bitmap_clear(server->out_notify_map,
+				service->notify_send_offset,
+				service->notify_send_bits);
+
+	server->in_quota_remaining += service->recv_quota;
+	server->out_quota_remaining += service->send_quota;
+
+	mutex_unlock(&server->alloc_lock);
+
+	service->recv_quota = 0;
+	service->send_quota = 0;
+	service->notify_recv_bits = 0;
+	service->notify_recv_offset = 0;
+	service->notify_send_bits = 0;
+	service->notify_send_offset = 0;
+
+	return 0;
+}
+
+static struct vs_server_core_state *
+vs_core_server_alloc(struct vs_service_device *service)
+{
+	struct core_server *server;
+	int err;
+
+	if (WARN_ON(service->id != 0))
+		goto fail;
+
+	server = kzalloc(sizeof(*server), GFP_KERNEL);
+	if (!server)
+		goto fail;
+
+	server->service = service;
+	INIT_LIST_HEAD(&server->message_queue);
+	INIT_WORK(&server->message_queue_work, message_queue_work);
+	mutex_init(&server->message_queue_lock);
+
+	err = init_transport_resource_allocation(server);
+	if (err)
+		goto fail_init_alloc;
+
+	err = alloc_transport_resources(server, service);
+	if (err)
+		goto fail_alloc_transport;
+
+	err = sysfs_create_group(&service->dev.kobj, &server_core_attr_group);
+	if (err)
+		goto fail_sysfs;
+
+	return &server->state;
+
+fail_sysfs:
+	free_transport_resources(server, service);
+fail_alloc_transport:
+	kfree(server->out_notify_map);
+	kfree(server->in_notify_map);
+fail_init_alloc:
+	kfree(server);
+fail:
+	return NULL;
+}
+
+static void vs_core_server_release(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+
+	/* Delete all the other services */
+	vs_session_delete_noncore(session);
+
+	sysfs_remove_group(&server->service->dev.kobj, &server_core_attr_group);
+	kfree(server->out_notify_map);
+	kfree(server->in_notify_map);
+	kfree(server);
+}
+
+/**
+ * vs_server_create_service - create and register a new vService server
+ * @session: the session to create the vService server on
+ * @parent: an existing server that is managing the new server
+ * @name: the name of the new service
+ * @protocol: the protocol for the new service
+ * @plat_data: value to be assigned to (struct device *)->platform_data
+ */
+struct vs_service_device *
+vs_server_create_service(struct vs_session_device *session,
+		struct vs_service_device *parent, const char *name,
+		const char *protocol, const void *plat_data)
+{
+	struct vs_service_device *core_service, *new_service;
+	struct core_server *server;
+
+	if (!session->is_server || !name || !protocol)
+		return NULL;
+
+	core_service = session->core_service;
+	if (!core_service)
+		return NULL;
+
+	device_lock(&core_service->dev);
+	if (!core_service->dev.driver) {
+		device_unlock(&core_service->dev);
+		return NULL;
+	}
+
+	server = dev_to_core_server(&core_service->dev);
+
+	if (!parent)
+		parent = core_service;
+
+	new_service = vs_server_core_create_service(server, session, parent,
+			VS_SERVICE_AUTO_ALLOCATE_ID, name, protocol, plat_data);
+
+	device_unlock(&core_service->dev);
+
+	if (IS_ERR(new_service))
+		return NULL;
+
+	return new_service;
+}
+EXPORT_SYMBOL(vs_server_create_service);
+
+/**
+ * vs_server_destroy_service - destroy and unregister a vService server. This
+ * function must _not_ be used from the target service's own workqueue.
+ * @service: The service to destroy
+ */
+int vs_server_destroy_service(struct vs_service_device *service,
+		struct vs_service_device *parent)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	if (!session->is_server || service->id == 0)
+		return -EINVAL;
+
+	if (!parent)
+		parent = session->core_service;
+
+	return vs_service_delete(service, parent);
+}
+EXPORT_SYMBOL(vs_server_destroy_service);
+
+static void __queue_service_created(struct vs_service_device *service,
+		void *data)
+{
+	struct core_server *server = (struct core_server *)data;
+
+	vs_server_core_queue_service_created(server, service);
+}
+
+static int vs_server_core_handle_connect(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+	int err;
+
+	/* Tell the other end that we've finished connecting. */
+	err = vs_server_core_core_send_ack_connect(state, GFP_KERNEL);
+	if (err)
+		return err;
+
+	/* Queue a service-created message for each existing service. */
+	vs_session_for_each_service(session, __queue_service_created, server);
+
+	/* Re-enable all the services. */
+	vs_session_enable_noncore(session);
+
+	return 0;
+}
+
+static void vs_core_server_disable_services(struct core_server *server)
+{
+	struct vs_session_device *session = vs_core_server_session(server);
+	struct pending_message *msg;
+
+	/* Disable all the other services */
+	vs_session_disable_noncore(session);
+
+	/* Flush all the pending service-readiness messages */
+	mutex_lock(&server->message_queue_lock);
+	while (!list_empty(&server->message_queue)) {
+		msg = list_first_entry(&server->message_queue,
+				struct pending_message, list);
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+	mutex_unlock(&server->message_queue_lock);
+}
+
+static int vs_server_core_handle_disconnect(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+
+	vs_core_server_disable_services(server);
+
+	return vs_server_core_core_send_ack_disconnect(state, GFP_KERNEL);
+}
+
+static int
+vs_server_core_handle_service_reset(struct vs_server_core_state *state,
+		unsigned service_id)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+
+	if (service_id == 0)
+		return -EPROTO;
+
+	return vs_service_handle_reset(session, service_id, false);
+}
+
+static void vs_core_server_start(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+	int err;
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
+			"Core server start\n");
+
+	err = vs_server_core_core_send_startup(&server->state,
+			server->service->recv_quota,
+			server->service->send_quota, GFP_KERNEL);
+
+	if (err)
+		dev_err(&session->dev, "Failed to start core protocol: %d\n",
+				err);
+}
+
+static void vs_core_server_reset(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
+			"Core server reset\n");
+
+	vs_core_server_disable_services(server);
+}
+
+static struct vs_server_core vs_core_server_driver = {
+	.alloc		= vs_core_server_alloc,
+	.release	= vs_core_server_release,
+	.start		= vs_core_server_start,
+	.reset		= vs_core_server_reset,
+	.tx_ready	= vs_core_server_tx_ready,
+	.core = {
+		.req_connect		= vs_server_core_handle_connect,
+		.req_disconnect		= vs_server_core_handle_disconnect,
+		.msg_service_reset	= vs_server_core_handle_service_reset,
+	},
+};
+
+/*
+ * Server bus driver
+ */
+static int vs_server_bus_match(struct device *dev, struct device_driver *driver)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
+
+	/* Don't match anything to the devio driver; it's bound manually */
+	if (!vsdrv->protocol)
+		return 0;
+
+	WARN_ON_ONCE(!service->is_server || !vsdrv->is_server);
+
+	/* Don't match anything that doesn't have a protocol set yet */
+	if (!service->protocol)
+		return 0;
+
+	if (strcmp(service->protocol, vsdrv->protocol) == 0)
+		return 1;
+
+	return 0;
+}
+
+static int vs_server_bus_probe(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	int ret;
+
+	/*
+	 * Set the notify counts for the service, unless the driver is the
+	 * devio driver in which case it has already been done by the devio
+	 * bind ioctl. The devio driver cannot be bound automatically.
+	 */
+	struct vs_service_driver *driver =
+		to_vs_service_driver(service->dev.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	if (driver != &vs_devio_server_driver)
+#endif
+	{
+		service->notify_recv_bits = driver->in_notify_count;
+		service->notify_send_bits = driver->out_notify_count;
+	}
+
+	/*
+	 * We can't allocate transport resources here for the core service
+	 * because the resource pool doesn't exist yet. It's done in alloc()
+	 * instead (which is called, indirectly, by vs_service_bus_probe()).
+	 */
+	if (service->id == 0)
+		return vs_service_bus_probe(dev);
+
+	if (!server)
+		return -ENODEV;
+	ret = alloc_transport_resources(server, service);
+	if (ret < 0)
+		goto fail;
+
+	ret = vs_service_bus_probe(dev);
+	if (ret < 0)
+		goto fail_free_resources;
+
+	return 0;
+
+fail_free_resources:
+	free_transport_resources(server, service);
+fail:
+	return ret;
+}
+
+static int vs_server_bus_remove(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+
+	vs_service_bus_remove(dev);
+
+	/*
+	 * We skip free_transport_resources for the core service because the
+	 * resource pool has already been freed at this point. It's also
+	 * possible that the core service has disappeared, in which case
+	 * there's no work to do here.
+	 */
+	if (server != NULL && service->id != 0)
+		free_transport_resources(server, service);
+
+	return 0;
+}
+
+static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
+}
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
+}
+
+static ssize_t dev_protocol_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
+}
+
+struct service_enable_work_struct {
+	struct vs_service_device *service;
+	struct work_struct work;
+};
+
+static void service_enable_work(struct work_struct *work)
+{
+	struct service_enable_work_struct *enable_work = container_of(work,
+			struct service_enable_work_struct, work);
+	struct vs_service_device *service = enable_work->service;
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	bool started;
+	int ret;
+
+	kfree(enable_work);
+
+	if (!server)
+		return;
+	/* Start and enable the service */
+	vs_service_state_lock(server->service);
+	started = vs_service_start(service);
+	if (!started) {
+		vs_service_state_unlock(server->service);
+		vs_put_service(service);
+		return;
+	}
+
+	if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
+		vs_service_enable(service);
+	vs_service_state_unlock(server->service);
+
+	/* Tell the bus to search for a driver that supports the protocol */
+	ret = device_attach(&service->dev);
+	if (ret == 0)
+		dev_warn(&service->dev, "No driver found for protocol: %s\n",
+				service->protocol);
+	kobject_uevent(&service->dev.kobj, KOBJ_CHANGE);
+
+	/* The corresponding vs_get_service was done when the work was queued */
+	vs_put_service(service);
+}
+
+static ssize_t dev_protocol_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct service_enable_work_struct *enable_work;
+
+	/* The protocol can only be set once */
+	if (service->protocol)
+		return -EPERM;
+
+	/* Registering additional core servers is not allowed */
+	if (strcmp(buf, VSERVICE_CORE_PROTOCOL_NAME) == 0)
+		return -EINVAL;
+
+	if (strnlen(buf, VSERVICE_CORE_PROTOCOL_NAME_SIZE) + 1 >
+			VSERVICE_CORE_PROTOCOL_NAME_SIZE)
+		return -E2BIG;
+
+	enable_work = kmalloc(sizeof(*enable_work), GFP_KERNEL);
+	if (!enable_work)
+		return -ENOMEM;
+
+	/* Set the protocol and tell the client about it */
+	service->protocol = kstrdup(buf, GFP_KERNEL);
+	if (!service->protocol) {
+		kfree(enable_work);
+		return -ENOMEM;
+	}
+	strim(service->protocol);
+
+	/*
+	 * Schedule work to enable the service. We can't do it here because
+	 * we need to take the core service lock, and doing that here makes
+	 * it depend circularly on this sysfs attribute, which can be deleted
+	 * with that lock held.
+	 *
+	 * The corresponding vs_put_service is called in the enable_work
+	 * function.
+	 */
+	INIT_WORK(&enable_work->work, service_enable_work);
+	enable_work->service = vs_get_service(service);
+	schedule_work(&enable_work->work);
+
+	return count;
+}
+
+static ssize_t service_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
+}
+
+static ssize_t quota_in_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	int ret;
+	unsigned long in_quota;
+
+	if (!server)
+		return -ENODEV;
+	/*
+	 * Don't allow quota to be changed for services that have a driver
+	 * bound. We take the alloc lock here because the device lock is held
+	 * while creating and destroying this sysfs item. This means we can
+	 * race with driver binding, but that doesn't matter: we actually just
+	 * want to know that alloc_transport_resources() hasn't run yet, and
+	 * that takes the alloc lock.
+	 */
+	mutex_lock(&server->alloc_lock);
+	if (service->dev.driver) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	ret = kstrtoul(buf, 0, &in_quota);
+	if (ret < 0)
+		goto out;
+
+	service->in_quota_set = in_quota;
+	ret = count;
+
+out:
+	mutex_unlock(&server->alloc_lock);
+
+	return ret;
+}
+
+static ssize_t quota_in_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", service->recv_quota);
+}
+
+static ssize_t quota_out_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	int ret;
+	unsigned long out_quota;
+
+	if (!server)
+		return -ENODEV;
+	/* See comment in quota_in_store. */
+	mutex_lock(&server->alloc_lock);
+	if (service->dev.driver) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	ret = kstrtoul(buf, 0, &out_quota);
+	if (ret < 0)
+		goto out;
+
+	service->out_quota_set = out_quota;
+	ret = count;
+
+out:
+	mutex_unlock(&server->alloc_lock);
+
+	return ret;
+}
+
+static ssize_t quota_out_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", service->send_quota);
+}
+
+static struct device_attribute vs_server_dev_attrs[] = {
+	__ATTR_RO(id),
+	__ATTR_RO(is_server),
+	__ATTR(protocol, S_IRUGO | S_IWUSR,
+			dev_protocol_show, dev_protocol_store),
+	__ATTR_RO(service_name),
+	__ATTR(quota_in, S_IRUGO | S_IWUSR,
+			quota_in_show, quota_in_store),
+	__ATTR(quota_out, S_IRUGO | S_IWUSR,
+			quota_out_show, quota_out_store),
+	__ATTR_NULL
+};
+
+static ssize_t protocol_show(struct device_driver *drv, char *buf)
+{
+	struct vs_service_driver *vsdrv = to_vs_service_driver(drv);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", vsdrv->protocol);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+static struct driver_attribute vs_server_drv_attrs[] = {
+	__ATTR_RO(protocol),
+	__ATTR_NULL
+};
+#else
+static DRIVER_ATTR_RO(protocol);
+
+static struct attribute *vs_server_drv_attrs[] = {
+	&driver_attr_protocol.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vs_server_drv);
+#endif
+
+struct bus_type vs_server_bus_type = {
+	.name		= "vservices-server",
+	.dev_attrs	= vs_server_dev_attrs,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+	.drv_attrs	= vs_server_drv_attrs,
+#else
+	.drv_groups	= vs_server_drv_groups,
+#endif
+	.match		= vs_server_bus_match,
+	.probe		= vs_server_bus_probe,
+	.remove		= vs_server_bus_remove,
+	.uevent		= vs_service_bus_uevent,
+};
+EXPORT_SYMBOL(vs_server_bus_type);
+
+/*
+ * Server session driver
+ */
+static int vs_server_session_probe(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_service_device *service;
+
+	service = __vs_server_core_register_service(session, 0, NULL,
+			VSERVICE_CORE_SERVICE_NAME,
+			VSERVICE_CORE_PROTOCOL_NAME, NULL);
+	if (IS_ERR(service))
+		return PTR_ERR(service);
+
+	return 0;
+}
+
+static int
+vs_server_session_service_added(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	if (WARN_ON(!server || !service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_created(server, service);
+
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send service_created: %d\n", err);
+
+	return err;
+}
+
+static int
+vs_server_session_service_start(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	if (WARN_ON(!server || !service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_reset_ready(server,
+			VSERVICE_CORE_CORE_MSG_SERVER_READY, service);
+
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send server_ready: %d\n", err);
+
+	return err;
+}
+
+static int
+vs_server_session_service_local_reset(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	if (WARN_ON(!server || !service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_reset_ready(server,
+			VSERVICE_CORE_CORE_MSG_SERVICE_RESET, service);
+
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send service_reset: %d\n", err);
+
+	return err;
+}
+
+static int
+vs_server_session_service_removed(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	/*
+	 * It's possible for the core server to be forcibly removed before
+	 * the other services, for example when the underlying transport
+	 * vanishes. If that happens, we can end up here with a NULL core
+	 * server pointer.
+	 */
+	if (!server)
+		return 0;
+
+	if (WARN_ON(!service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_removed(server, service);
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send service_removed: %d\n", err);
+
+	return err;
+}
+
+static struct vs_session_driver vs_server_session_driver = {
+	.driver	= {
+		.name			= "vservices-server-session",
+		.owner			= THIS_MODULE,
+		.bus			= &vs_session_bus_type,
+		.probe			= vs_server_session_probe,
+		.suppress_bind_attrs	= true,
+	},
+	.is_server		= true,
+	.service_bus		= &vs_server_bus_type,
+	.service_added		= vs_server_session_service_added,
+	.service_start		= vs_server_session_service_start,
+	.service_local_reset	= vs_server_session_service_local_reset,
+	.service_removed	= vs_server_session_service_removed,
+};
+
+static int __init vs_core_server_init(void)
+{
+	int ret;
+
+	ret = bus_register(&vs_server_bus_type);
+	if (ret)
+		goto fail_bus_register;
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	vs_devio_server_driver.driver.bus = &vs_server_bus_type;
+	vs_devio_server_driver.driver.owner = THIS_MODULE;
+	ret = driver_register(&vs_devio_server_driver.driver);
+	if (ret)
+		goto fail_devio_register;
+#endif
+
+	ret = driver_register(&vs_server_session_driver.driver);
+	if (ret)
+		goto fail_driver_register;
+
+	ret = vservice_core_server_register(&vs_core_server_driver,
+			"vs_core_server");
+	if (ret)
+		goto fail_core_register;
+
+	vservices_server_root = kobject_create_and_add("server-sessions",
+			vservices_root);
+	if (!vservices_server_root) {
+		ret = -ENOMEM;
+		goto fail_create_root;
+	}
+
+	return 0;
+
+fail_create_root:
+	vservice_core_server_unregister(&vs_core_server_driver);
+fail_core_register:
+	driver_unregister(&vs_server_session_driver.driver);
+fail_driver_register:
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_server_driver.driver);
+	vs_devio_server_driver.driver.bus = NULL;
+	vs_devio_server_driver.driver.owner = NULL;
+fail_devio_register:
+#endif
+	bus_unregister(&vs_server_bus_type);
+fail_bus_register:
+	return ret;
+}
+
+static void __exit vs_core_server_exit(void)
+{
+	kobject_put(vservices_server_root);
+	vservice_core_server_unregister(&vs_core_server_driver);
+	driver_unregister(&vs_server_session_driver.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_server_driver.driver);
+	vs_devio_server_driver.driver.bus = NULL;
+	vs_devio_server_driver.driver.owner = NULL;
+#endif
+	bus_unregister(&vs_server_bus_type);
+}
+
+subsys_initcall(vs_core_server_init);
+module_exit(vs_core_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Core Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/debug.h b/drivers/vservices/debug.h
new file mode 100644
index 0000000..b379b04
--- /dev/null
+++ b/drivers/vservices/debug.h
@@ -0,0 +1,74 @@
+/*
+ * drivers/vservices/debug.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Debugging macros and support functions for Virtual Services.
+ */
+#ifndef _VSERVICES_DEBUG_H
+#define _VSERVICES_DEBUG_H
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+#include <linux/printk.h>
+#else
+#ifndef no_printk
+#define no_printk(format, args...) do { } while (0)
+#endif
+#endif
+
+#include <vservices/session.h>
+#include "transport.h"
+
+#define VS_DEBUG_TRANSPORT		(1 << 0)
+#define VS_DEBUG_TRANSPORT_MESSAGES	(1 << 1)
+#define VS_DEBUG_SESSION		(1 << 2)
+#define VS_DEBUG_CLIENT			(1 << 3)
+#define VS_DEBUG_CLIENT_CORE		(1 << 4)
+#define VS_DEBUG_SERVER			(1 << 5)
+#define VS_DEBUG_SERVER_CORE		(1 << 6)
+#define VS_DEBUG_PROTOCOL		(1 << 7)
+#define VS_DEBUG_ALL			0xff
+
+#ifdef CONFIG_VSERVICES_DEBUG
+
+#define vs_debug(type, session, format, args...)			\
+	do {								\
+		if ((session)->debug_mask & (type))			\
+			dev_dbg(&(session)->dev, format, ##args);	\
+	} while (0)
+
+#define vs_dev_debug(type, session, dev, format, args...)		\
+	do {								\
+		if ((session)->debug_mask & (type))			\
+			dev_dbg(dev, format, ##args);			\
+	} while (0)
+
+static inline void vs_debug_dump_mbuf(struct vs_session_device *session,
+		struct vs_mbuf *mbuf)
+{
+	if (session->debug_mask & VS_DEBUG_TRANSPORT_MESSAGES)
+		print_hex_dump_bytes("msg:", DUMP_PREFIX_OFFSET,
+				mbuf->data, mbuf->size);
+}
+
+#else
+
+/* Dummy versions: Use no_printk to retain type/format string checking */
+#define vs_debug(type, session, format, args...) \
+	do { (void)session; no_printk(format, ##args); } while(0)
+
+#define vs_dev_debug(type, session, dev, format, args...) \
+	do { (void)session; (void)dev; no_printk(format, ##args); } while(0)
+
+static inline void vs_debug_dump_mbuf(struct vs_session_device *session,
+		struct vs_mbuf *mbuf) {}
+
+#endif /* CONFIG_VSERVICES_DEBUG */
+
+#endif /* _VSERVICES_DEBUG_H */
diff --git a/drivers/vservices/devio.c b/drivers/vservices/devio.c
new file mode 100644
index 0000000..b3ed4ab
--- /dev/null
+++ b/drivers/vservices/devio.c
@@ -0,0 +1,1059 @@
+/*
+ * devio.c - cdev I/O for service devices
+ *
+ * Copyright (c) 2016 Cog Systems Pty Ltd
+ *     Author: Philip Derrin <philip@cog.systems>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/security.h>
+#include <linux/compat.h>
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+#include <vservices/ioctl.h>
+#include "session.h"
+
+#define VSERVICES_DEVICE_MAX (VS_MAX_SERVICES * VS_MAX_SESSIONS)
+
+struct vs_devio_priv {
+	struct kref kref;
+	bool running, reset;
+
+	/* Receive queue */
+	wait_queue_head_t recv_wq;
+	atomic_t notify_pending;
+	struct list_head recv_queue;
+};
+
+static void
+vs_devio_priv_free(struct kref *kref)
+{
+	struct vs_devio_priv *priv = container_of(kref, struct vs_devio_priv,
+			kref);
+
+	WARN_ON(priv->running);
+	WARN_ON(!list_empty_careful(&priv->recv_queue));
+	WARN_ON(waitqueue_active(&priv->recv_wq));
+
+	kfree(priv);
+}
+
+static void vs_devio_priv_put(struct vs_devio_priv *priv)
+{
+	kref_put(&priv->kref, vs_devio_priv_free);
+}
+
+static int
+vs_devio_service_probe(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv;
+
+	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	kref_init(&priv->kref);
+	priv->running = false;
+	priv->reset = false;
+	init_waitqueue_head(&priv->recv_wq);
+	atomic_set(&priv->notify_pending, 0);
+	INIT_LIST_HEAD(&priv->recv_queue);
+
+	dev_set_drvdata(&service->dev, priv);
+
+	wake_up(&service->quota_wq);
+
+	return 0;
+}
+
+static int
+vs_devio_service_remove(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+	WARN_ON(priv->running);
+	WARN_ON(!list_empty_careful(&priv->recv_queue));
+	WARN_ON(waitqueue_active(&priv->recv_wq));
+
+	vs_devio_priv_put(priv);
+
+	return 0;
+}
+
+static int
+vs_devio_service_receive(struct vs_service_device *service,
+		struct vs_mbuf *mbuf)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+	WARN_ON(!priv->running);
+
+	spin_lock(&priv->recv_wq.lock);
+	list_add_tail(&mbuf->queue, &priv->recv_queue);
+	wake_up_locked(&priv->recv_wq);
+	spin_unlock(&priv->recv_wq.lock);
+
+	return 0;
+}
+
+static void
+vs_devio_service_notify(struct vs_service_device *service, u32 flags)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+	int old, cur;
+
+	WARN_ON(!priv->running);
+
+	if (!flags)
+		return;
+
+	/* open-coded atomic_or() */
+	cur = atomic_read(&priv->notify_pending);
+	while ((old = atomic_cmpxchg(&priv->notify_pending,
+					cur, cur | flags)) != cur)
+		cur = old;
+
+	wake_up(&priv->recv_wq);
+}
+
+static void
+vs_devio_service_start(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+	if (!priv->reset) {
+		WARN_ON(priv->running);
+		priv->running = true;
+		wake_up(&service->quota_wq);
+	}
+}
+
+static void
+vs_devio_service_reset(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+	struct vs_mbuf *mbuf, *tmp;
+
+	WARN_ON(!priv->running && !priv->reset);
+
+	/*
+	 * Mark the service as being in reset. This flag can never be cleared
+	 * on an open device; the user must acknowledge the reset by closing
+	 * and reopening the device.
+	 */
+	priv->reset = true;
+	priv->running = false;
+
+	spin_lock_irq(&priv->recv_wq.lock);
+	list_for_each_entry_safe(mbuf, tmp, &priv->recv_queue, queue)
+		vs_service_free_mbuf(service, mbuf);
+	INIT_LIST_HEAD(&priv->recv_queue);
+	spin_unlock_irq(&priv->recv_wq.lock);
+	wake_up_all(&priv->recv_wq);
+}
+
+/*
+ * This driver will be registered by the core server module, which must also
+ * set its bus and owner function pointers.
+ */
+struct vs_service_driver vs_devio_server_driver = {
+	/* No protocol, so the normal bus match will never bind this. */
+	.protocol	= NULL,
+	.is_server	= true,
+	.rx_atomic	= true,
+
+	.probe		= vs_devio_service_probe,
+	.remove		= vs_devio_service_remove,
+	.receive	= vs_devio_service_receive,
+	.notify		= vs_devio_service_notify,
+	.start		= vs_devio_service_start,
+	.reset		= vs_devio_service_reset,
+
+	/*
+	 * Set reasonable default quotas. These can be overridden by passing
+	 * nonzero values to IOCTL_VS_BIND_SERVER, which will set the
+	 * service's *_quota_set fields.
+	 */
+	.in_quota_min	= 1,
+	.in_quota_best	= 8,
+	.out_quota_min	= 1,
+	.out_quota_best	= 8,
+
+	/* Mark the notify counts as invalid; the service's will be used. */
+	.in_notify_count = (unsigned)-1,
+	.out_notify_count = (unsigned)-1,
+
+	.driver		= {
+		.name			= "vservices-server-devio",
+		.owner			= NULL, /* set by core server */
+		.bus			= NULL, /* set by core server */
+		.suppress_bind_attrs	= true, /* see vs_devio_poll */
+	},
+};
+EXPORT_SYMBOL_GPL(vs_devio_server_driver);
+
+static int
+vs_devio_bind_server(struct vs_service_device *service,
+		struct vs_ioctl_bind *bind)
+{
+	int ret = -ENODEV;
+
+	/* Ensure the server module is loaded and the driver is registered. */
+	if (!try_module_get(vs_devio_server_driver.driver.owner))
+		goto fail_module_get;
+
+	device_lock(&service->dev);
+	ret = -EBUSY;
+	if (service->dev.driver != NULL)
+		goto fail_device_unbound;
+
+	/* Set up the quota and notify counts. */
+	service->in_quota_set = bind->recv_quota;
+	service->out_quota_set = bind->send_quota;
+	service->notify_send_bits = bind->send_notify_bits;
+	service->notify_recv_bits = bind->recv_notify_bits;
+
+	/* Manually probe the driver. */
+	service->dev.driver = &vs_devio_server_driver.driver;
+	ret = service->dev.bus->probe(&service->dev);
+	if (ret < 0)
+		goto fail_probe_driver;
+
+	ret = device_bind_driver(&service->dev);
+	if (ret < 0)
+		goto fail_bind_driver;
+
+	/* Pass the allocated quotas back to the user. */
+	bind->recv_quota = service->recv_quota;
+	bind->send_quota = service->send_quota;
+	bind->msg_size = vs_service_max_mbuf_size(service);
+
+	device_unlock(&service->dev);
+	module_put(vs_devio_server_driver.driver.owner);
+
+	return 0;
+
+fail_bind_driver:
+	ret = service->dev.bus->remove(&service->dev);
+fail_probe_driver:
+	service->dev.driver = NULL;
+fail_device_unbound:
+	device_unlock(&service->dev);
+	module_put(vs_devio_server_driver.driver.owner);
+fail_module_get:
+	return ret;
+}
+
+/*
+ * This driver will be registered by the core client module, which must also
+ * set its bus and owner pointers.
+ */
+struct vs_service_driver vs_devio_client_driver = {
+	/* No protocol, so the normal bus match will never bind this. */
+	.protocol	= NULL,
+	.is_server	= false,
+	.rx_atomic	= true,
+
+	.probe		= vs_devio_service_probe,
+	.remove		= vs_devio_service_remove,
+	.receive	= vs_devio_service_receive,
+	.notify		= vs_devio_service_notify,
+	.start		= vs_devio_service_start,
+	.reset		= vs_devio_service_reset,
+
+	.driver		= {
+		.name			= "vservices-client-devio",
+		.owner			= NULL, /* set by core client */
+		.bus			= NULL, /* set by core client */
+		.suppress_bind_attrs	= true, /* see vs_devio_poll */
+	},
+};
+EXPORT_SYMBOL_GPL(vs_devio_client_driver);
+
+static int
+vs_devio_bind_client(struct vs_service_device *service,
+		struct vs_ioctl_bind *bind)
+{
+	int ret = -ENODEV;
+
+	/* Ensure the client module is loaded and the driver is registered. */
+	if (!try_module_get(vs_devio_client_driver.driver.owner))
+		goto fail_module_get;
+
+	device_lock(&service->dev);
+	ret = -EBUSY;
+	if (service->dev.driver != NULL)
+		goto fail_device_unbound;
+
+	/* Manually probe the driver. */
+	service->dev.driver = &vs_devio_client_driver.driver;
+	ret = service->dev.bus->probe(&service->dev);
+	if (ret < 0)
+		goto fail_probe_driver;
+
+	ret = device_bind_driver(&service->dev);
+	if (ret < 0)
+		goto fail_bind_driver;
+
+	/* Pass the allocated quotas back to the user. */
+	bind->recv_quota = service->recv_quota;
+	bind->send_quota = service->send_quota;
+	bind->msg_size = vs_service_max_mbuf_size(service);
+	bind->send_notify_bits = service->notify_send_bits;
+	bind->recv_notify_bits = service->notify_recv_bits;
+
+	device_unlock(&service->dev);
+	module_put(vs_devio_client_driver.driver.owner);
+
+	return 0;
+
+fail_bind_driver:
+	ret = service->dev.bus->remove(&service->dev);
+fail_probe_driver:
+	service->dev.driver = NULL;
+fail_device_unbound:
+	device_unlock(&service->dev);
+	module_put(vs_devio_client_driver.driver.owner);
+fail_module_get:
+	return ret;
+}
+
+static struct vs_devio_priv *
+vs_devio_priv_get_from_service(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = NULL;
+	struct device_driver *drv;
+
+	if (!service)
+		return NULL;
+
+	device_lock(&service->dev);
+	drv = service->dev.driver;
+
+	if ((drv == &vs_devio_client_driver.driver) ||
+			(drv == &vs_devio_server_driver.driver)) {
+		vs_service_state_lock(service);
+		priv = dev_get_drvdata(&service->dev);
+		if (priv)
+			kref_get(&priv->kref);
+		vs_service_state_unlock(service);
+	}
+
+	device_unlock(&service->dev);
+
+	return priv;
+}
+
+static int
+vs_devio_open(struct inode *inode, struct file *file)
+{
+	struct vs_service_device *service;
+
+	if (imajor(inode) != vservices_cdev_major)
+		return -ENODEV;
+
+	service = vs_service_lookup_by_devt(inode->i_rdev);
+	if (!service)
+		return -ENODEV;
+
+	file->private_data = service;
+
+	return 0;
+}
+
+static int
+vs_devio_release(struct inode *inode, struct file *file)
+{
+	struct vs_service_device *service = file->private_data;
+
+	if (service) {
+		struct vs_devio_priv *priv =
+			vs_devio_priv_get_from_service(service);
+
+		if (priv) {
+			device_release_driver(&service->dev);
+			vs_devio_priv_put(priv);
+		}
+
+		file->private_data = NULL;
+		vs_put_service(service);
+	}
+
+	return 0;
+}
+
+static struct iovec *
+vs_devio_check_iov(struct vs_ioctl_iovec *io, bool is_send, ssize_t *total)
+{
+	struct iovec *iov;
+	unsigned i;
+	int ret;
+
+	if (io->iovcnt > UIO_MAXIOV)
+		return ERR_PTR(-EINVAL);
+
+	iov = kmalloc(sizeof(*iov) * io->iovcnt, GFP_KERNEL);
+	if (!iov)
+		return ERR_PTR(-ENOMEM);
+
+	if (copy_from_user(iov, io->iov, sizeof(*iov) * io->iovcnt)) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	*total = 0;
+	for (i = 0; i < io->iovcnt; i++) {
+		ssize_t iov_len = (ssize_t)iov[i].iov_len;
+
+		if (iov_len > MAX_RW_COUNT - *total) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		if (!access_ok(is_send ? VERIFY_READ : VERIFY_WRITE,
+					iov[i].iov_base, iov_len)) {
+			ret = -EFAULT;
+			goto fail;
+		}
+
+		*total += iov_len;
+	}
+
+	return iov;
+
+fail:
+	kfree(iov);
+	return ERR_PTR(ret);
+}
+
+static ssize_t
+vs_devio_send(struct vs_service_device *service, struct iovec *iov,
+		size_t iovcnt, ssize_t to_send, bool nonblocking)
+{
+	struct vs_mbuf *mbuf = NULL;
+	struct vs_devio_priv *priv;
+	unsigned i;
+	ssize_t offset = 0;
+	ssize_t ret;
+	DEFINE_WAIT(wait);
+
+	priv = vs_devio_priv_get_from_service(service);
+	ret = -ENODEV;
+	if (!priv)
+		goto fail_priv_get;
+
+	vs_service_state_lock(service);
+
+	/*
+	 * Waiting alloc. We must open-code this because there is no real
+	 * state structure or base state.
+	 */
+	ret = 0;
+	while (!vs_service_send_mbufs_available(service)) {
+		if (nonblocking) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		prepare_to_wait_exclusive(&service->quota_wq, &wait,
+				TASK_INTERRUPTIBLE);
+
+		vs_service_state_unlock(service);
+		schedule();
+		vs_service_state_lock(service);
+
+		if (priv->reset) {
+			ret = -ECONNRESET;
+			break;
+		}
+
+		if (!priv->running) {
+			ret = -ENOTCONN;
+			break;
+		}
+	}
+	finish_wait(&service->quota_wq, &wait);
+
+	if (ret)
+		goto fail_alloc;
+
+	mbuf = vs_service_alloc_mbuf(service, to_send, GFP_KERNEL);
+	if (IS_ERR(mbuf)) {
+		ret = PTR_ERR(mbuf);
+		goto fail_alloc;
+	}
+
+	/* Ready to send; copy data into the mbuf. */
+	ret = -EFAULT;
+	for (i = 0; i < iovcnt; i++) {
+		if (copy_from_user(mbuf->data + offset, iov[i].iov_base,
+					iov[i].iov_len))
+			goto fail_copy;
+		offset += iov[i].iov_len;
+	}
+	mbuf->size = to_send;
+
+	/* Send the message. */
+	ret = vs_service_send(service, mbuf);
+	if (ret < 0)
+		goto fail_send;
+
+	/* Wake the next waiter, if there's more quota available. */
+	if (waitqueue_active(&service->quota_wq) &&
+			vs_service_send_mbufs_available(service) > 0)
+		wake_up(&service->quota_wq);
+
+	vs_service_state_unlock(service);
+	vs_devio_priv_put(priv);
+
+	return to_send;
+
+fail_send:
+fail_copy:
+	vs_service_free_mbuf(service, mbuf);
+	wake_up(&service->quota_wq);
+fail_alloc:
+	vs_service_state_unlock(service);
+	vs_devio_priv_put(priv);
+fail_priv_get:
+	return ret;
+}
+
+static ssize_t
+vs_devio_recv(struct vs_service_device *service, struct iovec *iov,
+		size_t iovcnt, u32 *notify_bits, ssize_t recv_space,
+		bool nonblocking)
+{
+	struct vs_mbuf *mbuf = NULL;
+	struct vs_devio_priv *priv;
+	unsigned i;
+	ssize_t offset = 0;
+	ssize_t ret;
+	DEFINE_WAIT(wait);
+
+	priv = vs_devio_priv_get_from_service(service);
+	ret = -ENODEV;
+	if (!priv)
+		goto fail_priv_get;
+
+	/* Take the recv_wq lock, which also protects recv_queue. */
+	spin_lock_irq(&priv->recv_wq.lock);
+
+	/* Wait for a message, notification, or reset. */
+	ret = wait_event_interruptible_exclusive_locked_irq(priv->recv_wq,
+			!list_empty(&priv->recv_queue) || priv->reset ||
+			atomic_read(&priv->notify_pending) || nonblocking);
+
+	if (priv->reset)
+		ret = -ECONNRESET; /* Service reset */
+	else if (!ret && list_empty(&priv->recv_queue))
+		ret = -EAGAIN; /* Nonblocking, or notification */
+
+	if (ret < 0) {
+		spin_unlock_irq(&priv->recv_wq.lock);
+		goto no_mbuf;
+	}
+
+	/* Take the first mbuf from the list, and check its size. */
+	mbuf = list_first_entry(&priv->recv_queue, struct vs_mbuf, queue);
+	if (mbuf->size > recv_space) {
+		spin_unlock_irq(&priv->recv_wq.lock);
+		ret = -EMSGSIZE;
+		goto fail_msg_size;
+	}
+	list_del_init(&mbuf->queue);
+
+	spin_unlock_irq(&priv->recv_wq.lock);
+
+	/* Copy to user. */
+	ret = -EFAULT;
+	for (i = 0; (mbuf->size > offset) && (i < iovcnt); i++) {
+		size_t len = min(mbuf->size - offset, iov[i].iov_len);
+		if (copy_to_user(iov[i].iov_base, mbuf->data + offset, len))
+			goto fail_copy;
+		offset += len;
+	}
+	ret = offset;
+
+no_mbuf:
+	/*
+	 * Read and clear the pending notification bits. If any notifications
+	 * are received, don't return an error, even if we failed to receive a
+	 * message.
+	 */
+	*notify_bits = atomic_xchg(&priv->notify_pending, 0);
+	if ((ret < 0) && *notify_bits)
+		ret = 0;
+
+fail_copy:
+	if (mbuf)
+		vs_service_free_mbuf(service, mbuf);
+fail_msg_size:
+	vs_devio_priv_put(priv);
+fail_priv_get:
+	return ret;
+}
+
+static int
+vs_devio_check_perms(struct file *file, unsigned flags)
+{
+	if ((flags & MAY_READ) & !(file->f_mode & FMODE_READ))
+		return -EBADF;
+
+	if ((flags & MAY_WRITE) & !(file->f_mode & FMODE_WRITE))
+		return -EBADF;
+
+	return security_file_permission(file, flags);
+}
+
+static long
+vs_devio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *ptr = (void __user *)arg;
+	struct vs_service_device *service = file->private_data;
+	struct vs_ioctl_bind bind;
+	struct vs_ioctl_iovec io;
+	u32 flags;
+	long ret;
+	ssize_t iov_total;
+	struct iovec *iov;
+
+	if (!service)
+		return -ENODEV;
+
+	switch (cmd) {
+	case IOCTL_VS_RESET_SERVICE:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		ret = vs_service_reset(service, service);
+		break;
+	case IOCTL_VS_GET_NAME:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (service->name != NULL) {
+			size_t len = strnlen(service->name,
+					_IOC_SIZE(IOCTL_VS_GET_NAME) - 1);
+			if (copy_to_user(ptr, service->name, len + 1))
+				ret = -EFAULT;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case IOCTL_VS_GET_PROTOCOL:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (service->protocol != NULL) {
+			size_t len = strnlen(service->protocol,
+					_IOC_SIZE(IOCTL_VS_GET_PROTOCOL) - 1);
+			if (copy_to_user(ptr, service->protocol, len + 1))
+				ret = -EFAULT;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case IOCTL_VS_BIND_CLIENT:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		ret = vs_devio_bind_client(service, &bind);
+		if (!ret && copy_to_user(ptr, &bind, sizeof(bind)))
+			ret = -EFAULT;
+		break;
+	case IOCTL_VS_BIND_SERVER:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&bind, ptr, sizeof(bind))) {
+			ret = -EFAULT;
+			break;
+		}
+		ret = vs_devio_bind_server(service, &bind);
+		if (!ret && copy_to_user(ptr, &bind, sizeof(bind)))
+			ret = -EFAULT;
+		break;
+	case IOCTL_VS_NOTIFY:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&flags, ptr, sizeof(flags))) {
+			ret = -EFAULT;
+			break;
+		}
+		ret = vs_service_notify(service, flags);
+		break;
+	case IOCTL_VS_SEND:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&io, ptr, sizeof(io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_iov(&io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_send(service, iov, io.iovcnt, iov_total,
+				file->f_flags & O_NONBLOCK);
+		kfree(iov);
+		break;
+	case IOCTL_VS_RECV:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&io, ptr, sizeof(io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_iov(&io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_recv(service, iov, io.iovcnt,
+			&io.notify_bits, iov_total,
+			file->f_flags & O_NONBLOCK);
+		kfree(iov);
+
+		if (ret >= 0) {
+			u32 __user *notify_bits_ptr = ptr + offsetof(
+					struct vs_ioctl_iovec, notify_bits);
+			if (copy_to_user(notify_bits_ptr, &io.notify_bits,
+					sizeof(io.notify_bits)))
+				ret = -EFAULT;
+		}
+		break;
+	default:
+		dev_dbg(&service->dev, "Unknown ioctl %#x, arg: %lx\n", cmd,
+				arg);
+		ret = -ENOSYS;
+		break;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct vs_compat_ioctl_bind {
+	__u32 send_quota;
+	__u32 recv_quota;
+	__u32 send_notify_bits;
+	__u32 recv_notify_bits;
+	compat_size_t msg_size;
+};
+
+#define compat_ioctl_bind_conv(dest, src) ({ \
+	dest.send_quota = src.send_quota;		\
+	dest.recv_quota = src.recv_quota;		\
+	dest.send_notify_bits = src.send_notify_bits;	\
+	dest.recv_notify_bits = src.recv_notify_bits;	\
+	dest.msg_size = (compat_size_t)src.msg_size;	\
+})
+
+#define COMPAT_IOCTL_VS_BIND_CLIENT _IOR('4', 3, struct vs_compat_ioctl_bind)
+#define COMPAT_IOCTL_VS_BIND_SERVER _IOWR('4', 4, struct vs_compat_ioctl_bind)
+
+struct vs_compat_ioctl_iovec {
+	union {
+		__u32 iovcnt; /* input */
+		__u32 notify_bits; /* output (recv only) */
+	};
+	compat_uptr_t iov;
+};
+
+#define COMPAT_IOCTL_VS_SEND \
+    _IOW('4', 6, struct vs_compat_ioctl_iovec)
+#define COMPAT_IOCTL_VS_RECV \
+    _IOWR('4', 7, struct vs_compat_ioctl_iovec)
+
+static struct iovec *
+vs_devio_check_compat_iov(struct vs_compat_ioctl_iovec *c_io,
+	bool is_send, ssize_t *total)
+{
+	struct iovec *iov;
+	struct compat_iovec *c_iov;
+
+	unsigned i;
+	int ret;
+
+	if (c_io->iovcnt > UIO_MAXIOV)
+		return ERR_PTR(-EINVAL);
+
+	c_iov = kzalloc(sizeof(*c_iov) * c_io->iovcnt, GFP_KERNEL);
+	if (!c_iov)
+		return ERR_PTR(-ENOMEM);
+
+	iov = kzalloc(sizeof(*iov) * c_io->iovcnt, GFP_KERNEL);
+	if (!iov) {
+		kfree(c_iov);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (copy_from_user(c_iov, (struct compat_iovec __user *)
+		compat_ptr(c_io->iov), sizeof(*c_iov) * c_io->iovcnt)) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	*total = 0;
+	for (i = 0; i < c_io->iovcnt; i++) {
+		ssize_t iov_len;
+		iov[i].iov_base = compat_ptr (c_iov[i].iov_base);
+		iov[i].iov_len = (compat_size_t) c_iov[i].iov_len;
+
+		iov_len = (ssize_t)iov[i].iov_len;
+
+		if (iov_len > MAX_RW_COUNT - *total) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		if (!access_ok(is_send ? VERIFY_READ : VERIFY_WRITE,
+					iov[i].iov_base, iov_len)) {
+			ret = -EFAULT;
+			goto fail;
+		}
+
+		*total += iov_len;
+	}
+
+	kfree (c_iov);
+	return iov;
+
+fail:
+	kfree(c_iov);
+	kfree(iov);
+	return ERR_PTR(ret);
+}
+
+static long
+vs_devio_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *ptr = (void __user *)arg;
+	struct vs_service_device *service = file->private_data;
+	struct vs_ioctl_bind bind;
+	struct vs_compat_ioctl_bind compat_bind;
+	struct vs_compat_ioctl_iovec compat_io;
+	long ret;
+	ssize_t iov_total;
+	struct iovec *iov;
+
+	if (!service)
+		return -ENODEV;
+
+	switch (cmd) {
+	case IOCTL_VS_RESET_SERVICE:
+	case IOCTL_VS_GET_NAME:
+	case IOCTL_VS_GET_PROTOCOL:
+		return vs_devio_ioctl (file, cmd, arg);
+	case COMPAT_IOCTL_VS_SEND:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&compat_io, ptr, sizeof(compat_io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_compat_iov(&compat_io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_send(service, iov, compat_io.iovcnt, iov_total,
+				file->f_flags & O_NONBLOCK);
+		kfree(iov);
+
+		break;
+	case COMPAT_IOCTL_VS_RECV:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&compat_io, ptr, sizeof(compat_io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_compat_iov(&compat_io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_recv(service, iov, compat_io.iovcnt,
+			&compat_io.notify_bits, iov_total,
+			file->f_flags & O_NONBLOCK);
+		kfree(iov);
+
+		if (ret >= 0) {
+			u32 __user *notify_bits_ptr = ptr + offsetof(
+					struct vs_compat_ioctl_iovec, notify_bits);
+			if (copy_to_user(notify_bits_ptr, &compat_io.notify_bits,
+					sizeof(compat_io.notify_bits)))
+				ret = -EFAULT;
+		}
+		break;
+	case COMPAT_IOCTL_VS_BIND_CLIENT:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		ret = vs_devio_bind_client(service, &bind);
+		compat_ioctl_bind_conv(compat_bind, bind);
+		if (!ret && copy_to_user(ptr, &compat_bind,
+					sizeof(compat_bind)))
+			ret = -EFAULT;
+		break;
+	case COMPAT_IOCTL_VS_BIND_SERVER:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&compat_bind, ptr, sizeof(compat_bind))) {
+			ret = -EFAULT;
+			break;
+		}
+		compat_ioctl_bind_conv(bind, compat_bind);
+		ret = vs_devio_bind_server(service, &bind);
+		compat_ioctl_bind_conv(compat_bind, bind);
+		if (!ret && copy_to_user(ptr, &compat_bind,
+					sizeof(compat_bind)))
+			ret = -EFAULT;
+		break;
+	default:
+		dev_dbg(&service->dev, "Unknown ioctl %#x, arg: %lx\n", cmd,
+				arg);
+		ret = -ENOSYS;
+		break;
+	}
+
+	return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
+static unsigned int
+vs_devio_poll(struct file *file, struct poll_table_struct *wait)
+{
+	struct vs_service_device *service = file->private_data;
+	struct vs_devio_priv *priv = vs_devio_priv_get_from_service(service);
+	unsigned int flags = 0;
+
+	poll_wait(file, &service->quota_wq, wait);
+
+	if (priv) {
+		/*
+		 * Note: there is no way for us to ensure that all poll
+		 * waiters on a given workqueue have gone away, other than to
+		 * actually close the file. So, this poll_wait() is only safe
+		 * if we never release our claim on the service before the
+		 * file is closed.
+		 *
+		 * We try to guarantee this by only unbinding the devio driver
+		 * on close, and setting suppress_bind_attrs in the driver so
+		 * root can't unbind us with sysfs.
+		 */
+		poll_wait(file, &priv->recv_wq, wait);
+
+		if (priv->reset) {
+			/* Service reset; raise poll error. */
+			flags |= POLLERR | POLLHUP;
+		} else if (priv->running) {
+			if (!list_empty_careful(&priv->recv_queue))
+				flags |= POLLRDNORM | POLLIN;
+			if (atomic_read(&priv->notify_pending))
+				flags |= POLLRDNORM | POLLIN;
+			if (vs_service_send_mbufs_available(service) > 0)
+				flags |= POLLWRNORM | POLLOUT;
+		}
+
+		vs_devio_priv_put(priv);
+	} else {
+		/* No driver attached. Return error flags. */
+		flags |= POLLERR | POLLHUP;
+	}
+
+	return flags;
+}
+
+static const struct file_operations vs_fops = {
+	.owner		= THIS_MODULE,
+	.open		= vs_devio_open,
+	.release	= vs_devio_release,
+	.unlocked_ioctl	= vs_devio_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= vs_devio_compat_ioctl,
+#endif
+	.poll		= vs_devio_poll,
+};
+
+int vservices_cdev_major;
+static struct cdev vs_cdev;
+
+int __init
+vs_devio_init(void)
+{
+	dev_t dev;
+	int r;
+
+	r = alloc_chrdev_region(&dev, 0, VSERVICES_DEVICE_MAX,
+			"vs_service");
+	if (r < 0)
+		goto fail_alloc_chrdev;
+	vservices_cdev_major = MAJOR(dev);
+
+	cdev_init(&vs_cdev, &vs_fops);
+	r = cdev_add(&vs_cdev, dev, VSERVICES_DEVICE_MAX);
+	if (r < 0)
+		goto fail_cdev_add;
+
+	return 0;
+
+fail_cdev_add:
+	unregister_chrdev_region(dev, VSERVICES_DEVICE_MAX);
+fail_alloc_chrdev:
+	return r;
+}
+
+void __exit
+vs_devio_exit(void)
+{
+	cdev_del(&vs_cdev);
+	unregister_chrdev_region(MKDEV(vservices_cdev_major, 0),
+			VSERVICES_DEVICE_MAX);
+}
diff --git a/drivers/vservices/protocol/Kconfig b/drivers/vservices/protocol/Kconfig
new file mode 100644
index 0000000..a4c622b
--- /dev/null
+++ b/drivers/vservices/protocol/Kconfig
@@ -0,0 +1,11 @@
+#
+# vServices protocol drivers configuration
+#
+
+if VSERVICES_SERVER || VSERVICES_CLIENT
+
+menu "Protocol drivers"
+
+endmenu
+
+endif # VSERVICES_SERVER || VSERVICES_CLIENT
diff --git a/drivers/vservices/protocol/Makefile b/drivers/vservices/protocol/Makefile
new file mode 100644
index 0000000..a2162c8
--- /dev/null
+++ b/drivers/vservices/protocol/Makefile
@@ -0,0 +1,3 @@
+# This is a autogenerated Makefile for vservice-linux-stacks
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += core/
diff --git a/drivers/vservices/protocol/core/Makefile b/drivers/vservices/protocol/core/Makefile
new file mode 100644
index 0000000..6bef7f5
--- /dev/null
+++ b/drivers/vservices/protocol/core/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_SERVER) += vservices_protocol_core_server.o
+vservices_protocol_core_server-objs = server.o
+
+obj-$(CONFIG_VSERVICES_CLIENT) += vservices_protocol_core_client.o
+vservices_protocol_core_client-objs = client.o
diff --git a/drivers/vservices/protocol/core/client.c b/drivers/vservices/protocol/core/client.c
new file mode 100644
index 0000000..2dd2136
--- /dev/null
+++ b/drivers/vservices/protocol/core/client.c
@@ -0,0 +1,1069 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the core client protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_core_client_driver {
+	struct vs_client_core *client;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+        container_of(d, struct vs_core_client_driver, vsdrv)
+
+static void core_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->start)
+		client->start(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->reset)
+		client->reset(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->start)
+		client->start(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static void core_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->reset)
+		client->reset(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static int core_client_probe(struct vs_service_device *service);
+static int core_client_remove(struct vs_service_device *service);
+static int core_handle_message(struct vs_service_device *service,
+			       struct vs_mbuf *_mbuf);
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t flags);
+static void core_handle_start(struct vs_service_device *service);
+static void core_handle_start_bh(struct vs_service_device *service);
+static void core_handle_reset(struct vs_service_device *service);
+static void core_handle_reset_bh(struct vs_service_device *service);
+static int core_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_core_client_register(struct vs_client_core *client,
+				    const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_core_client_driver *driver;
+
+	if (client->tx_atomic && !client->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	client->driver = &driver->vsdrv;
+	driver->client = client;
+
+	driver->vsdrv.protocol = VSERVICE_CORE_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = false;
+	driver->vsdrv.rx_atomic = client->rx_atomic;
+	driver->vsdrv.tx_atomic = client->tx_atomic;
+
+	driver->vsdrv.probe = core_client_probe;
+	driver->vsdrv.remove = core_client_remove;
+	driver->vsdrv.receive = core_handle_message;
+	driver->vsdrv.notify = core_handle_notify;
+	driver->vsdrv.start = client->tx_atomic ?
+	    core_handle_start_bh : core_handle_start;
+	driver->vsdrv.reset = client->tx_atomic ?
+	    core_handle_reset_bh : core_handle_reset;
+	driver->vsdrv.tx_ready = core_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	client->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_core_client_register);
+
+int vservice_core_client_unregister(struct vs_client_core *client)
+{
+	struct vs_core_client_driver *driver;
+
+	if (!client->driver)
+		return 0;
+
+	driver = to_client_driver(client->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	client->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_core_client_unregister);
+
+static int core_client_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client = to_client_driver(vsdrv)->client;
+	struct vs_client_core_state *state;
+
+	state = client->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int core_client_remove(struct vs_service_device *service)
+{
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client = to_client_driver(vsdrv)->client;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	client->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int core_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client = to_client_driver(vsdrv)->client;
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+
+	if (client->tx_ready)
+		client->tx_ready(state);
+
+	return 0;
+}
+
+int vs_client_core_core_getbufs_service_created(struct vs_client_core_state
+						*_state,
+						struct vs_string *service_name,
+						struct vs_string *protocol_name,
+						struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+	    VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+	const size_t _min_size = _max_size - VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	service_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	service_name->max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+
+	protocol_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+	protocol_name->max_size =
+	    VS_MBUF_SIZE(_mbuf) - (sizeof(vs_message_id_t) +
+				   VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+
+	/* Now check the size received is the exact size expected */
+	_exact_size =
+	    _max_size - (VSERVICE_CORE_PROTOCOL_NAME_SIZE -
+			 protocol_name->max_size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_getbufs_service_created);
+int vs_client_core_core_free_service_created(struct vs_client_core_state
+					     *_state,
+					     struct vs_string *service_name,
+					     struct vs_string *protocol_name,
+					     struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_free_service_created);
+int
+vs_client_core_core_req_connect(struct vs_client_core_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_core *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_REQ_CONNECT;
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED__CONNECT;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT);
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_req_connect);
+int
+vs_client_core_core_req_disconnect(struct vs_client_core_state *_state,
+				   gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_core *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_REQ_DISCONNECT;
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED__DISCONNECT;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT);
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_req_disconnect);
+static int
+core_core_handle_ack_connect(const struct vs_client_core *_client,
+			     struct vs_client_core_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.ack_connect)
+		return _client->core.ack_connect(_state);
+	return 0;
+}
+
+static int
+core_core_handle_nack_connect(const struct vs_client_core *_client,
+			      struct vs_client_core_state *_state,
+			      struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.nack_connect)
+		return _client->core.nack_connect(_state);
+	return 0;
+}
+
+EXPORT_SYMBOL(core_core_handle_ack_connect);
+static int
+core_core_handle_ack_disconnect(const struct vs_client_core *_client,
+				struct vs_client_core_state *_state,
+				struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.ack_disconnect)
+		return _client->core.ack_disconnect(_state);
+	return 0;
+}
+
+static int
+core_core_handle_nack_disconnect(const struct vs_client_core *_client,
+				 struct vs_client_core_state *_state,
+				 struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.nack_disconnect)
+		return _client->core.nack_disconnect(_state);
+	return 0;
+}
+
+EXPORT_SYMBOL(core_core_handle_ack_disconnect);
+static int
+vs_client_core_core_handle_startup(const struct vs_client_core *_client,
+				   struct vs_client_core_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+	uint32_t core_in_quota;
+	uint32_t core_out_quota;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_OFFLINE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state, VSERVICE_CORE_STATE_OFFLINE,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+	core_in_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	core_out_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_startup)
+		return _client->core.msg_startup(_state, core_in_quota,
+						 core_out_quota);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_startup);
+static int
+vs_client_core_core_handle_shutdown(const struct vs_client_core *_client,
+				    struct vs_client_core_state *_state,
+				    struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_client->core.state_change)
+			_client->core.state_change(_state,
+						   VSERVICE_CORE_STATE_DISCONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+	case VSERVICE_CORE_STATE_CONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_client->core.state_change)
+			_client->core.state_change(_state,
+						   VSERVICE_CORE_STATE_CONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+
+	default:
+		break;
+	}
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_shutdown)
+		return _client->core.msg_shutdown(_state);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_shutdown);
+static int
+vs_client_core_core_handle_service_created(const struct vs_client_core *_client,
+					   struct vs_client_core_state *_state,
+					   struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+	    VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+	uint32_t service_id;
+	struct vs_string service_name;
+	struct vs_string protocol_name;
+	const size_t _min_size = _max_size - VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+	size_t _exact_size;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	service_name.ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	service_name.max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+
+	protocol_name.ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+	protocol_name.max_size =
+	    VS_MBUF_SIZE(_mbuf) - (sizeof(vs_message_id_t) +
+				   VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+
+	/* Now check the size received is the exact size expected */
+	_exact_size =
+	    _max_size - (VSERVICE_CORE_PROTOCOL_NAME_SIZE -
+			 protocol_name.max_size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_client->core.msg_service_created)
+		return _client->core.msg_service_created(_state, service_id,
+							 service_name,
+							 protocol_name, _mbuf);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_created);
+static int
+vs_client_core_core_handle_service_removed(const struct vs_client_core *_client,
+					   struct vs_client_core_state *_state,
+					   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	uint32_t service_id;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_service_removed)
+		return _client->core.msg_service_removed(_state, service_id);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_removed);
+static int
+vs_client_core_core_handle_server_ready(const struct vs_client_core *_client,
+					struct vs_client_core_state *_state,
+					struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 28UL;
+	uint32_t service_id;
+	uint32_t in_quota;
+	uint32_t out_quota;
+	uint32_t in_bit_offset;
+	uint32_t in_num_bits;
+	uint32_t out_bit_offset;
+	uint32_t out_num_bits;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	in_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	out_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+	in_bit_offset =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   12UL);
+	in_num_bits =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   16UL);
+	out_bit_offset =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   20UL);
+	out_num_bits =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   24UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_server_ready)
+		return _client->core.msg_server_ready(_state, service_id,
+						      in_quota, out_quota,
+						      in_bit_offset,
+						      in_num_bits,
+						      out_bit_offset,
+						      out_num_bits);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_server_ready);
+static int
+vs_client_core_core_handle_service_reset(const struct vs_client_core *_client,
+					 struct vs_client_core_state *_state,
+					 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	uint32_t service_id;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_service_reset)
+		return _client->core.msg_service_reset(_state, service_id);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_reset);
+int
+vs_client_core_core_send_service_reset(struct vs_client_core_state *_state,
+				       uint32_t service_id, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_core *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVICE_RESET;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_send_service_reset);
+static int
+core_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_client_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_core *client =
+	    to_client_driver(vsdrv)->client;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface core **/
+/* command in sync connect */
+	case VSERVICE_CORE_CORE_ACK_CONNECT:
+		ret = core_core_handle_ack_connect(client, state, _mbuf);
+		break;
+	case VSERVICE_CORE_CORE_NACK_CONNECT:
+		ret = core_core_handle_nack_connect(client, state, _mbuf);
+		break;
+
+/* command in sync disconnect */
+	case VSERVICE_CORE_CORE_ACK_DISCONNECT:
+		ret = core_core_handle_ack_disconnect(client, state, _mbuf);
+		break;
+	case VSERVICE_CORE_CORE_NACK_DISCONNECT:
+		ret = core_core_handle_nack_disconnect(client, state, _mbuf);
+		break;
+
+/* message startup */
+	case VSERVICE_CORE_CORE_MSG_STARTUP:
+		ret = vs_client_core_core_handle_startup(client, state, _mbuf);
+		break;
+
+/* message shutdown */
+	case VSERVICE_CORE_CORE_MSG_SHUTDOWN:
+		ret = vs_client_core_core_handle_shutdown(client, state, _mbuf);
+		break;
+
+/* message service_created */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
+		ret =
+		    vs_client_core_core_handle_service_created(client, state,
+							       _mbuf);
+		break;
+
+/* message service_removed */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
+		ret =
+		    vs_client_core_core_handle_service_removed(client, state,
+							       _mbuf);
+		break;
+
+/* message server_ready */
+	case VSERVICE_CORE_CORE_MSG_SERVER_READY:
+		ret =
+		    vs_client_core_core_handle_server_ready(client, state,
+							    _mbuf);
+		break;
+
+/* message service_reset */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+		ret =
+		    vs_client_core_core_handle_service_reset(client, state,
+							     _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t notify_bits)
+{
+	__maybe_unused struct vs_client_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_core *client =
+	    to_client_driver(vsdrv)->client;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface core **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services coreClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/core/server.c b/drivers/vservices/protocol/core/server.c
new file mode 100644
index 0000000..c3f3686
--- /dev/null
+++ b/drivers/vservices/protocol/core/server.c
@@ -0,0 +1,1226 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the core server protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_core_server_driver {
+	struct vs_server_core *server;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+        container_of(d, struct vs_core_server_driver, vsdrv)
+
+static void core_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->start)
+		server->start(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->reset)
+		server->reset(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->start)
+		server->start(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static void core_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->reset)
+		server->reset(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static int core_server_probe(struct vs_service_device *service);
+static int core_server_remove(struct vs_service_device *service);
+static int core_handle_message(struct vs_service_device *service,
+			       struct vs_mbuf *_mbuf);
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t flags);
+static void core_handle_start(struct vs_service_device *service);
+static void core_handle_start_bh(struct vs_service_device *service);
+static void core_handle_reset(struct vs_service_device *service);
+static void core_handle_reset_bh(struct vs_service_device *service);
+static int core_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_core_server_register(struct vs_server_core *server,
+				    const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_core_server_driver *driver;
+
+	if (server->tx_atomic && !server->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	server->driver = &driver->vsdrv;
+	driver->server = server;
+
+	driver->vsdrv.protocol = VSERVICE_CORE_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = true;
+	driver->vsdrv.rx_atomic = server->rx_atomic;
+	driver->vsdrv.tx_atomic = server->tx_atomic;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.in_quota_min = 1;
+	driver->vsdrv.in_quota_best = server->in_quota_best ?
+	    server->in_quota_best : driver->vsdrv.in_quota_min;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.out_quota_min = 1;
+	driver->vsdrv.out_quota_best = server->out_quota_best ?
+	    server->out_quota_best : driver->vsdrv.out_quota_min;
+	driver->vsdrv.in_notify_count = VSERVICE_CORE_NBIT_IN__COUNT;
+	driver->vsdrv.out_notify_count = VSERVICE_CORE_NBIT_OUT__COUNT;
+
+	driver->vsdrv.probe = core_server_probe;
+	driver->vsdrv.remove = core_server_remove;
+	driver->vsdrv.receive = core_handle_message;
+	driver->vsdrv.notify = core_handle_notify;
+	driver->vsdrv.start = server->tx_atomic ?
+	    core_handle_start_bh : core_handle_start;
+	driver->vsdrv.reset = server->tx_atomic ?
+	    core_handle_reset_bh : core_handle_reset;
+	driver->vsdrv.tx_ready = core_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	server->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_core_server_register);
+
+int vservice_core_server_unregister(struct vs_server_core *server)
+{
+	struct vs_core_server_driver *driver;
+
+	if (!server->driver)
+		return 0;
+
+	driver = to_server_driver(server->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	server->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_core_server_unregister);
+
+static int core_server_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server = to_server_driver(vsdrv)->server;
+	struct vs_server_core_state *state;
+
+	state = server->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int core_server_remove(struct vs_service_device *service)
+{
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server = to_server_driver(vsdrv)->server;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	server->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int core_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server = to_server_driver(vsdrv)->server;
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+
+	if (server->tx_ready)
+		server->tx_ready(state);
+
+	return 0;
+}
+
+struct vs_mbuf *vs_server_core_core_alloc_service_created(struct
+							  vs_server_core_state
+							  *_state,
+							  struct vs_string
+							  *service_name,
+							  struct vs_string
+							  *protocol_name,
+							  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+	    VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!service_name)
+		goto fail;
+	service_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	service_name->max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+	if (!protocol_name)
+		goto fail;
+	protocol_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+	protocol_name->max_size = VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_alloc_service_created);
+int vs_server_core_core_free_service_created(struct vs_server_core_state
+					     *_state,
+					     struct vs_string *service_name,
+					     struct vs_string *protocol_name,
+					     struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_free_service_created);
+int
+vs_server_core_core_send_ack_connect(struct vs_server_core_state *_state,
+				     gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_ACK_CONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_ack_connect);
+int
+vs_server_core_core_send_nack_connect(struct vs_server_core_state *_state,
+				      gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_NACK_CONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_nack_connect);
+int
+vs_server_core_core_send_ack_disconnect(struct vs_server_core_state *_state,
+					gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_ACK_DISCONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_ack_disconnect);
+int
+vs_server_core_core_send_nack_disconnect(struct vs_server_core_state *_state,
+					 gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_NACK_DISCONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_nack_disconnect);
+static int
+vs_server_core_core_handle_req_connect(const struct vs_server_core *_server,
+				       struct vs_server_core_state *_state,
+				       struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED__CONNECT;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->core.req_connect)
+		return _server->core.req_connect(_state);
+	else
+		dev_warn(&_state->service->dev,
+			 "[%s:%d] Protocol warning: No handler registered for _server->core.req_connect, command will never be acknowledged\n",
+			 __func__, __LINE__);
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_req_connect);
+static int
+vs_server_core_core_handle_req_disconnect(const struct vs_server_core *_server,
+					  struct vs_server_core_state *_state,
+					  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED__DISCONNECT;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->core.req_disconnect)
+		return _server->core.req_disconnect(_state);
+	else
+		dev_warn(&_state->service->dev,
+			 "[%s:%d] Protocol warning: No handler registered for _server->core.req_disconnect, command will never be acknowledged\n",
+			 __func__, __LINE__);
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_req_disconnect);
+int
+vs_server_core_core_send_startup(struct vs_server_core_state *_state,
+				 uint32_t core_in_quota,
+				 uint32_t core_out_quota, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_OFFLINE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_STARTUP;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    core_in_quota;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    core_out_quota;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state, VSERVICE_CORE_STATE_OFFLINE,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_startup);
+int
+vs_server_core_core_send_shutdown(struct vs_server_core_state *_state,
+				  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SHUTDOWN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_server->core.state_change)
+			_server->core.state_change(_state,
+						   VSERVICE_CORE_STATE_DISCONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+	case VSERVICE_CORE_STATE_CONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_server->core.state_change)
+			_server->core.state_change(_state,
+						   VSERVICE_CORE_STATE_CONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_shutdown);
+int
+vs_server_core_core_send_service_created(struct vs_server_core_state *_state,
+					 uint32_t service_id,
+					 struct vs_string service_name,
+					 struct vs_string protocol_name,
+					 struct vs_mbuf *_mbuf)
+{
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_CORE_CORE_MSG_SERVICE_CREATED)
+
+		return -EINVAL;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+	{
+		size_t _size = strnlen(service_name.ptr, service_name.max_size);
+		if ((_size + sizeof(vs_message_id_t) + 4UL) >
+		    VS_MBUF_SIZE(_mbuf))
+			return -EINVAL;
+
+		memset(service_name.ptr + _size, 0,
+		       service_name.max_size - _size);
+	}
+	{
+		size_t _size =
+		    strnlen(protocol_name.ptr, protocol_name.max_size);
+		if ((_size + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL) >
+		    VS_MBUF_SIZE(_mbuf))
+			return -EINVAL;
+
+		if (_size < protocol_name.max_size)
+			VS_MBUF_SIZE(_mbuf) -= (protocol_name.max_size - _size);
+
+	}
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_created);
+int
+vs_server_core_core_send_service_removed(struct vs_server_core_state *_state,
+					 uint32_t service_id, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_removed);
+int
+vs_server_core_core_send_server_ready(struct vs_server_core_state *_state,
+				      uint32_t service_id, uint32_t in_quota,
+				      uint32_t out_quota,
+				      uint32_t in_bit_offset,
+				      uint32_t in_num_bits,
+				      uint32_t out_bit_offset,
+				      uint32_t out_num_bits, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 28UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVER_READY;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    in_quota;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL) =
+	    out_quota;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+	    in_bit_offset;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+	    in_num_bits;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+	    out_bit_offset;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+	    out_num_bits;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_server_ready);
+int
+vs_server_core_core_send_service_reset(struct vs_server_core_state *_state,
+				       uint32_t service_id, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVICE_RESET;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_reset);
+static int
+vs_server_core_core_handle_service_reset(const struct vs_server_core *_server,
+					 struct vs_server_core_state *_state,
+					 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	uint32_t service_id;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->core.msg_service_reset)
+		return _server->core.msg_service_reset(_state, service_id);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_service_reset);
+static int
+core_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_server_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_core *server =
+	    to_server_driver(vsdrv)->server;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface core **/
+/* command in sync connect */
+	case VSERVICE_CORE_CORE_REQ_CONNECT:
+		ret =
+		    vs_server_core_core_handle_req_connect(server, state,
+							   _mbuf);
+		break;
+
+/* command in sync disconnect */
+	case VSERVICE_CORE_CORE_REQ_DISCONNECT:
+		ret =
+		    vs_server_core_core_handle_req_disconnect(server, state,
+							      _mbuf);
+		break;
+
+/* message service_reset */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+		ret =
+		    vs_server_core_core_handle_service_reset(server, state,
+							     _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t notify_bits)
+{
+	__maybe_unused struct vs_server_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_core *server =
+	    to_server_driver(vsdrv)->server;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface core **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services coreServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/session.c b/drivers/vservices/session.c
new file mode 100644
index 0000000..d695184
--- /dev/null
+++ b/drivers/vservices/session.c
@@ -0,0 +1,2913 @@
+/*
+ * drivers/vservices/session.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the generic session-management code for the vServices framework.
+ * It creates service and session devices on request from session and
+ * transport drivers, respectively; it also queues incoming messages from the
+ * transport and distributes them to the session's services.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+
+#include "session.h"
+#include "transport.h"
+#include "compat.h"
+
+/* Minimum required time between resets to avoid throttling */
+#define RESET_THROTTLE_TIME msecs_to_jiffies(1000)
+
+/*
+ * Minimum/maximum reset throttling time. The reset throttle will start at
+ * the minimum and increase to the maximum exponetially.
+ */
+#define RESET_THROTTLE_MIN RESET_THROTTLE_TIME
+#define RESET_THROTTLE_MAX msecs_to_jiffies(8 * 1000)
+
+/*
+ * If the reset is being throttled and a sane reset (doesn't need throttling)
+ * is requested, then if the service's reset delay mutliplied by this value
+ * has elapsed throttling is disabled.
+ */
+#define RESET_THROTTLE_COOL_OFF_MULT 2
+
+/* IDR of session ids to sessions */
+static DEFINE_IDR(session_idr);
+DEFINE_MUTEX(vs_session_lock);
+EXPORT_SYMBOL_GPL(vs_session_lock);
+
+/* Notifier list for vService session events */
+static BLOCKING_NOTIFIER_HEAD(vs_session_notifier_list);
+
+static unsigned long default_debug_mask;
+module_param(default_debug_mask, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(default_debug_mask, "Default vServices debug mask");
+
+/* vServices root in sysfs at /sys/vservices */
+struct kobject *vservices_root;
+EXPORT_SYMBOL_GPL(vservices_root);
+
+/* vServices server root in sysfs at /sys/vservices/server-sessions */
+struct kobject *vservices_server_root;
+EXPORT_SYMBOL_GPL(vservices_server_root);
+
+/* vServices client root in sysfs at /sys/vservices/client-sessions */
+struct kobject *vservices_client_root;
+EXPORT_SYMBOL_GPL(vservices_client_root);
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+struct vs_service_device *vs_service_lookup_by_devt(dev_t dev)
+{
+	struct vs_session_device *session;
+	struct vs_service_device *service;
+
+	mutex_lock(&vs_session_lock);
+	session = idr_find(&session_idr, MINOR(dev) / VS_MAX_SERVICES);
+	get_device(&session->dev);
+	mutex_unlock(&vs_session_lock);
+
+	service = vs_session_get_service(session,
+			MINOR(dev) % VS_MAX_SERVICES);
+	put_device(&session->dev);
+
+	return service;
+}
+#endif
+
+struct vs_session_for_each_data {
+	int (*fn)(struct vs_session_device *session, void *data);
+	void *data;
+};
+
+int vs_session_for_each_from_idr(int id, void *session, void *_data)
+{
+	struct vs_session_for_each_data *data =
+		(struct vs_session_for_each_data *)_data;
+	return data->fn(session, data->data);
+}
+
+/**
+ * vs_session_for_each_locked - call a callback function for each session
+ * @fn: function to call
+ * @data: opaque pointer that is passed through to the function
+ */
+extern int vs_session_for_each_locked(
+		int (*fn)(struct vs_session_device *session, void *data),
+		void *data)
+{
+	struct vs_session_for_each_data priv = { .fn = fn, .data = data };
+
+	lockdep_assert_held(&vs_session_lock);
+
+	return idr_for_each(&session_idr, vs_session_for_each_from_idr,
+			&priv);
+}
+EXPORT_SYMBOL(vs_session_for_each_locked);
+
+/**
+ * vs_register_notify - register a notifier callback for vServices events
+ * @nb: pointer to the notifier block for the callback events.
+ */
+void vs_session_register_notify(struct notifier_block *nb)
+{
+	blocking_notifier_chain_register(&vs_session_notifier_list, nb);
+}
+EXPORT_SYMBOL(vs_session_register_notify);
+
+/**
+ * vs_unregister_notify - unregister a notifier callback for vServices events
+ * @nb: pointer to the notifier block for the callback events.
+ */
+void vs_session_unregister_notify(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&vs_session_notifier_list, nb);
+}
+EXPORT_SYMBOL(vs_session_unregister_notify);
+
+/*
+ * Helper function for returning how long ago something happened
+ * Marked as __maybe_unused since this is only needed when
+ * CONFIG_VSERVICES_DEBUG is enabled, but cannot be removed because it
+ * will cause compile time errors.
+ */
+static __maybe_unused unsigned msecs_ago(unsigned long jiffy_value)
+{
+	return jiffies_to_msecs(jiffies - jiffy_value);
+}
+
+static void session_fatal_error_work(struct work_struct *work)
+{
+	struct vs_session_device *session = container_of(work,
+			struct vs_session_device, fatal_error_work);
+
+	session->transport->vt->reset(session->transport);
+}
+
+static void session_fatal_error(struct vs_session_device *session, gfp_t gfp)
+{
+	schedule_work(&session->fatal_error_work);
+}
+
+/*
+ * Service readiness state machine
+ *
+ * The states are:
+ *
+ * INIT: Initial state. Service may not be completely configured yet
+ * (typically because the protocol hasn't been set); call vs_service_start
+ * once configuration is complete. The disable count must be nonzero, and
+ * must never reach zero in this state.
+ * DISABLED: Service is not permitted to communicate. Non-core services are
+ * in this state whenever the core protocol and/or transport state does not
+ * allow them to be active; core services are only in this state transiently.
+ * The disable count must be nonzero; when it reaches zero, the service
+ * transitions to RESET state.
+ * RESET: Service drivers are inactive at both ends, but the core service
+ * state allows the service to become active. The session will schedule a
+ * future transition to READY state when entering this state, but the
+ * transition may be delayed to throttle the rate at which resets occur.
+ * READY: All core-service and session-layer policy allows the service to
+ * communicate; it will become active as soon as it has a protocol driver.
+ * ACTIVE: The driver is present and communicating.
+ * LOCAL_RESET: We have initiated a reset at this end, but the remote end has
+ * not yet acknowledged it. We will enter the RESET state on receiving
+ * acknowledgement, unless the disable count is nonzero in which case we
+ * will enter DISABLED state.
+ * LOCAL_DELETE: As for LOCAL_RESET, but we will enter the DELETED state
+ * instead of RESET or DISABLED.
+ * DELETED: The service is no longer present on the session; the service
+ * device structure may still exist because something is holding a reference
+ * to it.
+ *
+ * The permitted transitions are:
+ *
+ * From          To            Trigger
+ * INIT          DISABLED      vs_service_start
+ * DISABLED      RESET         vs_service_enable (disable_count -> 0)
+ * RESET         READY         End of throttle delay (may be 0)
+ * READY         ACTIVE        Latter of probe() and entering READY
+ * {READY, ACTIVE}
+ *               LOCAL_RESET   vs_service_reset
+ * {READY, ACTIVE, LOCAL_RESET}
+ *               RESET         vs_service_handle_reset (server)
+ * RESET         DISABLED      vs_service_disable (server)
+ * {READY, ACTIVE, LOCAL_RESET}
+ *               DISABLED      vs_service_handle_reset (client)
+ * {INIT, RESET, READY, ACTIVE, LOCAL_RESET}
+ *               DISABLED      vs_service_disable_noncore
+ * {ACTIVE, LOCAL_RESET}
+ *               LOCAL_DELETE  vs_service_delete
+ * {INIT, DISABLED, RESET, READY}
+ *               DELETED       vs_service_delete
+ * LOCAL_DELETE  DELETED       vs_service_handle_reset
+ *                             vs_service_disable_noncore
+ *
+ * See the documentation for the triggers for details.
+ */
+
+enum vs_service_readiness {
+	VS_SERVICE_INIT,
+	VS_SERVICE_DISABLED,
+	VS_SERVICE_RESET,
+	VS_SERVICE_READY,
+	VS_SERVICE_ACTIVE,
+	VS_SERVICE_LOCAL_RESET,
+	VS_SERVICE_LOCAL_DELETE,
+	VS_SERVICE_DELETED,
+};
+
+/* Session activation states. */
+enum {
+	VS_SESSION_RESET,
+	VS_SESSION_ACTIVATE,
+	VS_SESSION_ACTIVE,
+};
+
+/**
+ * vs_service_start - Start a service by moving it from the init state to the
+ * disabled state.
+ *
+ * @service: The service to start.
+ *
+ * Returns true if the service was started, or false if it was not.
+ */
+bool vs_service_start(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+
+	WARN_ON(!service->protocol);
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	if (service->readiness != VS_SERVICE_INIT) {
+		if (service->readiness != VS_SERVICE_DELETED)
+			dev_err(&service->dev,
+					"start called from invalid state %d\n",
+					service->readiness);
+		mutex_unlock(&service->ready_lock);
+		return false;
+	}
+
+	if (service->id != 0 && session_drv->service_added) {
+		int err = session_drv->service_added(session, service);
+		if (err < 0) {
+			dev_err(&session->dev, "Failed to add service %d: %d\n",
+					service->id, err);
+			mutex_unlock(&service->ready_lock);
+			return false;
+		}
+	}
+
+	service->readiness = VS_SERVICE_DISABLED;
+	service->disable_count = 1;
+	service->last_reset_request = jiffies;
+
+	mutex_unlock(&service->ready_lock);
+
+	/* Tell userspace about the service. */
+	dev_set_uevent_suppress(&service->dev, false);
+	kobject_uevent(&service->dev.kobj, KOBJ_ADD);
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(vs_service_start);
+
+static void cancel_pending_rx(struct vs_service_device *service);
+static void queue_ready_work(struct vs_service_device *service);
+
+static void __try_start_service(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	struct vs_transport *transport;
+	int err;
+	struct vs_service_driver *driver;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	/* We can't start if the service is not ready yet. */
+	if (service->readiness != VS_SERVICE_READY)
+		return;
+
+	/*
+	 * There should never be anything in the RX queue at this point.
+	 * If there is, it can seriously confuse the service drivers for
+	 * no obvious reason, so we check.
+	 */
+	if (WARN_ON(!list_empty(&service->rx_queue)))
+		cancel_pending_rx(service);
+
+	if (!service->driver_probed) {
+		vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+				"ready with no driver\n");
+		return;
+	}
+
+	/* Prepare the transport to support the service. */
+	transport = session->transport;
+	err = transport->vt->service_start(transport, service);
+
+	if (err < 0) {
+		/* fatal error attempting to start; reset and try again */
+		service->readiness = VS_SERVICE_RESET;
+		service->last_reset_request = jiffies;
+		service->last_reset = jiffies;
+		queue_ready_work(service);
+
+		return;
+	}
+
+	service->readiness = VS_SERVICE_ACTIVE;
+
+	driver = to_vs_service_driver(service->dev.driver);
+	if (driver->start)
+		driver->start(service);
+
+	if (service->id && session_drv->service_start) {
+		err = session_drv->service_start(session, service);
+		if (err < 0) {
+			dev_err(&session->dev, "Failed to start service %s (%d): %d\n",
+					dev_name(&service->dev),
+					service->id, err);
+			session_fatal_error(session, GFP_KERNEL);
+		}
+	}
+}
+
+static void try_start_service(struct vs_service_device *service)
+{
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	__try_start_service(service);
+
+	mutex_unlock(&service->ready_lock);
+}
+
+static void service_ready_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, ready_work.work);
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+			"ready work - last reset request was %u ms ago\n",
+			msecs_ago(service->last_reset_request));
+
+	/*
+	 * Make sure there's no reset work pending from an earlier driver
+	 * failure. We should already be inactive at this point, so it's safe
+	 * to just cancel it.
+	 */
+	cancel_work_sync(&service->reset_work);
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	if (service->readiness != VS_SERVICE_RESET) {
+		vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+				"ready work found readiness of %d, doing nothing\n",
+				service->readiness);
+		mutex_unlock(&service->ready_lock);
+		return;
+	}
+
+	service->readiness = VS_SERVICE_READY;
+	/* Record the time at which this happened, for throttling. */
+	service->last_ready = jiffies;
+
+	/* Tell userspace that the service is ready. */
+	kobject_uevent(&service->dev.kobj, KOBJ_ONLINE);
+
+	/* Start the service, if it has a driver attached. */
+	__try_start_service(service);
+
+	mutex_unlock(&service->ready_lock);
+}
+
+static int __enable_service(struct vs_service_device *service);
+
+/**
+ * __reset_service - make a service inactive, and tell its driver, the
+ * transport, and possibly the remote partner
+ * @service:       The service to reset
+ * @notify_remote: If true, the partner is notified of the reset
+ *
+ * This routine is called to make an active service inactive. If the given
+ * service is currently active, it drops any queued messages for the service,
+ * and then informs the service driver and the transport layer that the
+ * service has reset. It sets the service readiness to VS_SERVICE_LOCAL_RESET
+ * to indicate that the driver is no longer active.
+ *
+ * This routine has no effect on services that are not active.
+ *
+ * The caller must hold the target service's ready lock.
+ */
+static void __reset_service(struct vs_service_device *service,
+		bool notify_remote)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	struct vs_service_driver *driver = NULL;
+	struct vs_transport *transport;
+	int err;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	/* If we're already inactive, there's nothing to do. */
+	if (service->readiness != VS_SERVICE_ACTIVE)
+		return;
+
+	service->last_reset = jiffies;
+	service->readiness = VS_SERVICE_LOCAL_RESET;
+
+	cancel_pending_rx(service);
+
+	if (!WARN_ON(!service->driver_probed))
+		driver = to_vs_service_driver(service->dev.driver);
+
+	if (driver && driver->reset)
+		driver->reset(service);
+
+	wake_up_all(&service->quota_wq);
+
+	transport = vs_service_get_session(service)->transport;
+
+	/*
+	 * Ask the transport to reset the service. If this returns a positive
+	 * value, we need to leave the service disabled, and the transport
+	 * will re-enable it. To avoid allowing the disable count to go
+	 * negative if that re-enable races with this callback returning, we
+	 * disable the service beforehand and re-enable it if the callback
+	 * returns zero.
+	 */
+	service->disable_count++;
+	err = transport->vt->service_reset(transport, service);
+	if (err < 0) {
+		dev_err(&session->dev, "Failed to reset service %d: %d (transport)\n",
+				service->id, err);
+		session_fatal_error(session, GFP_KERNEL);
+	} else if (!err) {
+		err = __enable_service(service);
+	}
+
+	if (notify_remote) {
+		if (service->id) {
+			err = session_drv->service_local_reset(session,
+					service);
+			if (err == VS_SERVICE_ALREADY_RESET) {
+				service->readiness = VS_SERVICE_RESET;
+                                service->last_reset = jiffies;
+                                queue_ready_work(service);
+
+			} else if (err < 0) {
+				dev_err(&session->dev, "Failed to reset service %d: %d (session)\n",
+						service->id, err);
+				session_fatal_error(session, GFP_KERNEL);
+			}
+		} else {
+			session->transport->vt->reset(session->transport);
+		}
+	}
+
+	/* Tell userspace that the service is no longer active. */
+	kobject_uevent(&service->dev.kobj, KOBJ_OFFLINE);
+}
+
+/**
+ * reset_service - reset a service and inform the remote partner
+ * @service: The service to reset
+ *
+ * This routine is called when a reset is locally initiated (other than
+ * implicitly by a session / core service reset). It bumps the reset request
+ * timestamp, acquires the necessary locks, and calls __reset_service.
+ *
+ * This routine returns with the service ready lock held, to allow the caller
+ * to make any other state changes that must be atomic with the service
+ * reset.
+ */
+static void reset_service(struct vs_service_device *service)
+	__acquires(service->ready_lock)
+{
+	service->last_reset_request = jiffies;
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	__reset_service(service, true);
+}
+
+/**
+ * vs_service_reset - initiate a service reset
+ * @service: the service that is to be reset
+ * @caller: the service that is initiating the reset
+ *
+ * This routine informs the partner that the given service is being reset,
+ * then disables and flushes the service's receive queues and resets its
+ * driver. The service will be automatically re-enabled once the partner has
+ * acknowledged the reset (see vs_session_handle_service_reset, above).
+ *
+ * If the given service is the core service, this will perform a transport
+ * reset, which implicitly resets (on the server side) or destroys (on
+ * the client side) every other service on the session.
+ *
+ * If the given service is already being reset, this has no effect, other
+ * than to delay completion of the reset if it is being throttled.
+ *
+ * For lock safety reasons, a service can only be directly reset by itself,
+ * the core service, or the service that created it (which is typically also
+ * the core service).
+ *
+ * A service that wishes to reset itself must not do so while holding its state
+ * lock or while running on its own workqueue. In these circumstances, call
+ * vs_service_reset_nosync() instead. Note that returning an error code
+ * (any negative number) from a driver callback forces a call to
+ * vs_service_reset_nosync() and prints an error message.
+ */
+int vs_service_reset(struct vs_service_device *service,
+		struct vs_service_device *caller)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	if (caller != service && caller != service->owner) {
+		struct vs_service_device *core_service = session->core_service;
+
+		WARN_ON(!core_service);
+		if (caller != core_service)
+			return -EPERM;
+	}
+
+	reset_service(service);
+	/* reset_service returns with ready_lock held, but we don't need it */
+	mutex_unlock(&service->ready_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_reset);
+
+/**
+ * vs_service_reset_nosync - asynchronously reset a service.
+ * @service: the service that is to be reset
+ *
+ * This routine triggers a reset for the nominated service. It may be called
+ * from any context, including interrupt context. It does not wait for the
+ * reset to occur, and provides no synchronisation guarantees when called from
+ * outside the target service.
+ *
+ * This is intended only for service drivers that need to reset themselves
+ * from a context that would not normally allow it. In other cases, use
+ * vs_service_reset.
+ */
+void vs_service_reset_nosync(struct vs_service_device *service)
+{
+	service->pending_reset = true;
+	schedule_work(&service->reset_work);
+}
+EXPORT_SYMBOL_GPL(vs_service_reset_nosync);
+
+static void
+vs_service_remove_sysfs_entries(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	sysfs_remove_link(session->sysfs_entry, service->sysfs_name);
+	sysfs_remove_link(&service->dev.kobj, VS_SESSION_SYMLINK_NAME);
+}
+
+static void vs_session_release_service_id(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	mutex_lock(&session->service_idr_lock);
+	idr_remove(&session->service_idr, service->id);
+	mutex_unlock(&session->service_idr_lock);
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+			"service id deallocated\n");
+}
+
+static void destroy_service(struct vs_service_device *service,
+		bool notify_remote)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	struct vs_service_device *core_service __maybe_unused =
+			session->core_service;
+	int err;
+
+	lockdep_assert_held(&service->ready_lock);
+	WARN_ON(service->readiness != VS_SERVICE_DELETED);
+
+	/* Notify the core service and transport that the service is gone */
+	session->transport->vt->service_remove(session->transport, service);
+	if (notify_remote && service->id && session_drv->service_removed) {
+		err = session_drv->service_removed(session, service);
+		if (err < 0) {
+			dev_err(&session->dev,
+					"Failed to remove service %d: %d\n",
+					service->id, err);
+			session_fatal_error(session, GFP_KERNEL);
+		}
+	}
+
+	/*
+	 * At this point the service is guaranteed to be gone on the client
+	 * side, so we can safely release the service ID.
+	 */
+	if (session->is_server)
+		vs_session_release_service_id(service);
+
+	/*
+	 * This guarantees that any concurrent vs_session_get_service() that
+	 * found the service before we removed it from the IDR will take a
+	 * reference before we release ours.
+	 *
+	 * This similarly protects for_each_[usable_]service().
+	 */
+	synchronize_rcu();
+
+	/* Matches device_initialize() in vs_service_register() */
+	put_device(&service->dev);
+}
+
+/**
+ * disable_service - prevent a service becoming ready
+ * @service: the service that is to be disabled
+ * @force: true if the service is known to be in reset
+ *
+ * This routine may be called for any inactive service. Once disabled, the
+ * service cannot be made ready by the session, and thus cannot become active,
+ * until vs_service_enable() is called for it. If multiple calls are made to
+ * this function, they must be balanced by vs_service_enable() calls.
+ *
+ * If the force option is true, then any pending unacknowledged reset will be
+ * presumed to have been acknowledged. This is used when the core service is
+ * entering reset.
+ *
+ * This is used by the core service client to prevent the service restarting
+ * until the server is ready (i.e., a server_ready message is received); by
+ * the session layer to stop all communication while the core service itself
+ * is in reset; and by the transport layer when the transport was unable to
+ * complete reset of a service in its reset callback (typically because
+ * a service had passed message buffers to another Linux subsystem and could
+ * not free them immediately).
+ *
+ * In any case, there is no need for the operation to be signalled in any
+ * way, because the service is already in reset. It simply delays future
+ * signalling of service readiness.
+ */
+static void disable_service(struct vs_service_device *service, bool force)
+{
+	lockdep_assert_held(&service->ready_lock);
+
+	switch(service->readiness) {
+	case VS_SERVICE_INIT:
+	case VS_SERVICE_DELETED:
+	case VS_SERVICE_LOCAL_DELETE:
+		dev_err(&service->dev, "disabled while uninitialised\n");
+		break;
+	case VS_SERVICE_ACTIVE:
+		dev_err(&service->dev, "disabled while active\n");
+		break;
+	case VS_SERVICE_LOCAL_RESET:
+		/*
+		 * Will go to DISABLED state when reset completes, unless
+		 * it's being forced (i.e. we're moving to a core protocol
+		 * state that implies everything else is reset).
+		 */
+		if (force)
+			service->readiness = VS_SERVICE_DISABLED;
+		service->disable_count++;
+		break;
+	default:
+		service->readiness = VS_SERVICE_DISABLED;
+		service->disable_count++;
+		break;
+	}
+
+	cancel_delayed_work(&service->ready_work);
+}
+
+static int service_handle_reset(struct vs_session_device *session,
+		struct vs_service_device *target, bool disable)
+{
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	int err = 0;
+
+	mutex_lock_nested(&target->ready_lock, target->lock_subclass);
+
+	switch (target->readiness) {
+	case VS_SERVICE_LOCAL_DELETE:
+		target->readiness = VS_SERVICE_DELETED;
+		destroy_service(target, true);
+		break;
+	case VS_SERVICE_ACTIVE:
+		/*
+		 * Reset the service and send a reset notification.
+		 *
+		 * We only send notifications for non-core services. This is
+		 * because core notifies by sending a transport reset, which
+		 * is what brought us here in the first place. Note that we
+		 * must already hold the core service state lock iff the
+		 * target is non-core.
+		 */
+		target->last_reset_request = jiffies;
+		__reset_service(target, target->id != 0);
+		/* fall through */
+	case VS_SERVICE_LOCAL_RESET:
+		target->readiness = target->disable_count ?
+			VS_SERVICE_DISABLED : VS_SERVICE_RESET;
+		if (disable)
+			disable_service(target, false);
+		if (target->readiness != VS_SERVICE_DISABLED)
+			queue_ready_work(target);
+		break;
+	case VS_SERVICE_READY:
+		/* Tell userspace that the service is no longer ready. */
+		kobject_uevent(&target->dev.kobj, KOBJ_OFFLINE);
+		/* fall through */
+	case VS_SERVICE_RESET:
+		/*
+		 * This can happen for a non-core service if we get a reset
+		 * request from the server on the client side, after the
+		 * client has enabled the service but before it is active.
+		 * Note that the service is already active on the server side
+		 * at this point. The client's delay may be due to either
+		 * reset throttling or the absence of a driver.
+		 *
+		 * We bump the reset request timestamp, disable the service
+		 * again, and send back an acknowledgement.
+		 */
+		if (disable && target->id) {
+			target->last_reset_request = jiffies;
+
+			err = session_drv->service_local_reset(
+					session, target);
+			if (err < 0) {
+				dev_err(&session->dev,
+						"Failed to reset service %d; %d\n",
+						target->id, err);
+				session_fatal_error(session,
+						GFP_KERNEL);
+			}
+
+			disable_service(target, false);
+			break;
+		}
+		/* fall through */
+	case VS_SERVICE_DISABLED:
+		/*
+		 * This can happen for the core service if we get a reset
+		 * before the transport has activated, or before the core
+		 * service has become ready.
+		 *
+		 * We bump the reset request timestamp, and disable the
+		 * service again if the transport had already activated and
+		 * enabled it.
+		 */
+		if (disable && !target->id) {
+			target->last_reset_request = jiffies;
+
+			if (target->readiness != VS_SERVICE_DISABLED)
+				disable_service(target, false);
+
+			break;
+		}
+		/* fall through */
+	default:
+		dev_warn(&target->dev, "remote reset while inactive (%d)\n",
+				target->readiness);
+		err = -EPROTO;
+		break;
+	}
+
+	mutex_unlock(&target->ready_lock);
+	return err;
+}
+
+/**
+ * vs_service_handle_reset - handle an incoming notification of a reset
+ * @session: the session that owns the service
+ * @service_id: the ID of the service that is to be reset
+ * @disable: if true, the service will not be automatically re-enabled
+ *
+ * This routine is called by the core service when the remote end notifies us
+ * of a non-core service reset. The service must be in ACTIVE, LOCAL_RESET or
+ * LOCAL_DELETED state. It must be called with the core service's state lock
+ * held.
+ *
+ * If the service was in ACTIVE state, the core service is called back to send
+ * a notification to the other end. If it was in LOCAL_DELETED state, it is
+ * unregistered.
+ */
+int vs_service_handle_reset(struct vs_session_device *session,
+		vs_service_id_t service_id, bool disable)
+{
+	struct vs_service_device *target;
+	int ret;
+
+	if (!service_id)
+		return -EINVAL;
+
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -ENODEV;
+
+	ret = service_handle_reset(session, target, disable);
+	vs_put_service(target);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vs_service_handle_reset);
+
+static int __enable_service(struct vs_service_device *service)
+{
+	if (WARN_ON(!service->disable_count))
+		return -EINVAL;
+
+	if (--service->disable_count > 0)
+		return 0;
+
+	/*
+	 * If the service is still resetting, it can't become ready until the
+	 * reset completes. If it has been deleted, it will never become
+	 * ready. In either case, there's nothing more to do.
+	 */
+	if ((service->readiness == VS_SERVICE_LOCAL_RESET) ||
+			(service->readiness == VS_SERVICE_LOCAL_DELETE) ||
+			(service->readiness == VS_SERVICE_DELETED))
+		return 0;
+
+	if (WARN_ON(service->readiness != VS_SERVICE_DISABLED))
+		return -EINVAL;
+
+	service->readiness = VS_SERVICE_RESET;
+	service->last_reset = jiffies;
+	queue_ready_work(service);
+
+	return 0;
+}
+
+/**
+ * vs_service_enable - allow a service to become ready
+ * @service: the service that is to be enabled
+ *
+ * Calling this routine for a service permits the session layer to make the
+ * service ready. It will do so as soon as any outstanding reset throttling
+ * is complete, and will then start the service once it has a driver attached.
+ *
+ * Services are disabled, requiring a call to this routine to re-enable them:
+ * - when first initialised (after vs_service_start),
+ * - when reset on the client side by vs_service_handle_reset,
+ * - when the transport has delayed completion of a reset, and
+ * - when the server-side core protocol is disconnected or reset by
+ *   vs_session_disable_noncore.
+ */
+int vs_service_enable(struct vs_service_device *service)
+{
+	int ret;
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	ret = __enable_service(service);
+
+	mutex_unlock(&service->ready_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vs_service_enable);
+
+/*
+ * Service work functions
+ */
+static void queue_rx_work(struct vs_service_device *service)
+{
+	bool rx_atomic;
+
+	rx_atomic = vs_service_has_atomic_rx(service);
+	vs_dev_debug(VS_DEBUG_SESSION, vs_service_get_session(service),
+			&service->dev, "Queuing rx %s\n",
+			rx_atomic ? "tasklet (atomic)" : "work (cansleep)");
+
+	if (rx_atomic)
+		tasklet_schedule(&service->rx_tasklet);
+	else
+		queue_work(service->work_queue, &service->rx_work);
+}
+
+static void cancel_pending_rx(struct vs_service_device *service)
+{
+	struct vs_mbuf *mbuf;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	cancel_work_sync(&service->rx_work);
+	tasklet_kill(&service->rx_tasklet);
+
+	spin_lock_irq(&service->rx_lock);
+	while (!list_empty(&service->rx_queue)) {
+		mbuf = list_first_entry(&service->rx_queue,
+				struct vs_mbuf, queue);
+		list_del_init(&mbuf->queue);
+		spin_unlock_irq(&service->rx_lock);
+		vs_service_free_mbuf(service, mbuf);
+		spin_lock_irq(&service->rx_lock);
+	}
+	service->tx_ready = false;
+	spin_unlock_irq(&service->rx_lock);
+}
+
+static bool reset_throttle_cooled_off(struct vs_service_device *service);
+static unsigned long reset_cool_off(struct vs_service_device *service);
+
+static void service_cooloff_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, cooloff_work.work);
+	struct vs_session_device *session = vs_service_get_session(service);
+	unsigned long current_time = jiffies, wake_time;
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	if (reset_throttle_cooled_off(service)) {
+		vs_debug(VS_DEBUG_SESSION, session,
+				"Reset thrashing cooled off (delay = %u ms, cool off = %u ms, last reset %u ms ago, last reset request was %u ms ago)\n",
+				jiffies_to_msecs(service->reset_delay),
+				jiffies_to_msecs(reset_cool_off(service)),
+				msecs_ago(service->last_reset),
+				msecs_ago(service->last_reset_request));
+
+		service->reset_delay = 0;
+
+		/*
+		 * If the service is already in reset, then queue_ready_work
+		 * has already run and has deferred queuing of the ready_work
+		 * until cooloff. Schedule the ready work to run immediately.
+		 */
+		if (service->readiness == VS_SERVICE_RESET)
+			schedule_delayed_work(&service->ready_work, 0);
+	} else {
+		/*
+		 * This can happen if last_reset_request has been bumped
+		 * since the cooloff work was first queued. We need to
+		 * work out how long it is until the service cools off,
+		 * then reschedule ourselves.
+		 */
+		wake_time = reset_cool_off(service) +
+				service->last_reset_request;
+
+		WARN_ON(time_after(current_time, wake_time));
+
+		schedule_delayed_work(&service->cooloff_work,
+				wake_time - current_time);
+	}
+
+	mutex_unlock(&service->ready_lock);
+}
+
+static void
+service_reset_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, reset_work);
+
+	service->pending_reset = false;
+
+	vs_service_reset(service, service);
+}
+
+/* Returns true if there are more messages to handle */
+static bool
+dequeue_and_handle_received_message(struct vs_service_device *service)
+{
+	struct vs_service_driver *driver =
+			to_vs_service_driver(service->dev.driver);
+	struct vs_session_device *session = vs_service_get_session(service);
+	const struct vs_transport_vtable *vt = session->transport->vt;
+	struct vs_service_stats *stats = &service->stats;
+	struct vs_mbuf *mbuf;
+	size_t size;
+	int ret;
+
+	/* Don't do rx work unless the service is active */
+	if (service->readiness != VS_SERVICE_ACTIVE)
+		return false;
+
+	/* Atomically take an item from the queue */
+	spin_lock_irq(&service->rx_lock);
+	if (!list_empty(&service->rx_queue)) {
+		mbuf = list_first_entry(&service->rx_queue, struct vs_mbuf,
+				queue);
+		list_del_init(&mbuf->queue);
+		spin_unlock_irq(&service->rx_lock);
+		size = vt->mbuf_size(mbuf);
+
+		/*
+		 * Call the message handler for the service. The service's
+		 * message handler is responsible for freeing the mbuf when it
+		 * is done with it.
+		 */
+		ret = driver->receive(service, mbuf);
+		if (ret < 0) {
+			atomic_inc(&service->stats.recv_failures);
+			dev_err(&service->dev,
+					"receive returned %d; resetting service\n",
+					ret);
+			vs_service_reset_nosync(service);
+			return false;
+		} else {
+			atomic_add(size, &service->stats.recv_bytes);
+			atomic_inc(&service->stats.recv_mbufs);
+		}
+
+	} else if (service->tx_ready) {
+		service->tx_ready = false;
+		spin_unlock_irq(&service->rx_lock);
+
+		/*
+		 * Update the tx_ready stats accounting and then call the
+		 * service's tx_ready handler.
+		 */
+		atomic_inc(&stats->nr_tx_ready);
+		if (atomic_read(&stats->nr_over_quota) > 0) {
+			int total;
+
+			total = atomic_add_return(jiffies_to_msecs(jiffies -
+							stats->over_quota_time),
+					&stats->over_quota_time_total);
+			atomic_set(&stats->over_quota_time_avg, total /
+					atomic_read(&stats->nr_over_quota));
+		}
+		atomic_set(&service->is_over_quota, 0);
+
+		/*
+		 * Note that a service's quota may reduce at any point, even
+		 * during the tx_ready handler. This is important if a service
+		 * has an ordered list of pending messages to send. If a
+		 * message fails to send from the tx_ready handler due to
+		 * over-quota then subsequent messages in the same handler may
+		 * send successfully. To avoid sending messages in the
+		 * incorrect order the service's tx_ready handler should
+		 * return immediately if a message fails to send.
+		 */
+		ret = driver->tx_ready(service);
+		if (ret < 0) {
+			dev_err(&service->dev,
+					"tx_ready returned %d; resetting service\n",
+					ret);
+			vs_service_reset_nosync(service);
+			return false;
+		}
+	} else {
+		spin_unlock_irq(&service->rx_lock);
+	}
+
+	/*
+	 * There's no need to lock for this list_empty: if we race
+	 * with a msg enqueue, we'll be rescheduled by the other side,
+	 * and if we race with a dequeue, we'll just do nothing when
+	 * we run (or will be cancelled before we run).
+	 */
+	return !list_empty(&service->rx_queue) || service->tx_ready;
+}
+
+static void service_rx_tasklet(unsigned long data)
+{
+	struct vs_service_device *service = (struct vs_service_device *)data;
+	bool resched;
+
+	/*
+	 * There is no need to acquire the state spinlock or mutex here,
+	 * because this tasklet is disabled when the lock is held. These
+	 * are annotations for sparse and lockdep, respectively.
+	 *
+	 * We can't annotate the implicit mutex acquire because lockdep gets
+	 * upset about inconsistent softirq states.
+	 */
+	__acquire(service);
+	spin_acquire(&service->state_spinlock.dep_map, 0, 0, _THIS_IP_);
+
+	resched = dequeue_and_handle_received_message(service);
+
+	if (resched)
+		tasklet_schedule(&service->rx_tasklet);
+
+	spin_release(&service->state_spinlock.dep_map, 0, _THIS_IP_);
+	__release(service);
+}
+
+static void service_rx_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, rx_work);
+	bool requeue;
+
+	/*
+	 * We must acquire the state mutex here to protect services that
+	 * are using vs_service_state_lock().
+	 *
+	 * There is no need to acquire the spinlock, which is never used in
+	 * drivers with task context receive handlers.
+	 */
+	vs_service_state_lock(service);
+
+	requeue = dequeue_and_handle_received_message(service);
+
+	vs_service_state_unlock(service);
+
+	if (requeue)
+		queue_work(service->work_queue, work);
+}
+
+/*
+ * Service sysfs statistics counters. These files are all atomic_t, and
+ * read only, so we use a generator macro to avoid code duplication.
+ */
+#define service_stat_attr(__name)					\
+	static ssize_t service_stat_##__name##_show(struct device *dev, \
+			struct device_attribute *attr, char *buf)       \
+	{                                                               \
+		struct vs_service_device *service =                     \
+				to_vs_service_device(dev);              \
+									\
+		return scnprintf(buf, PAGE_SIZE, "%u\n",		\
+				atomic_read(&service->stats.__name));	\
+	}                                                               \
+	static DEVICE_ATTR(__name, S_IRUGO,                             \
+			service_stat_##__name##_show, NULL);
+
+service_stat_attr(sent_mbufs);
+service_stat_attr(sent_bytes);
+service_stat_attr(recv_mbufs);
+service_stat_attr(recv_bytes);
+service_stat_attr(nr_over_quota);
+service_stat_attr(nr_tx_ready);
+service_stat_attr(over_quota_time_total);
+service_stat_attr(over_quota_time_avg);
+
+static struct attribute *service_stat_dev_attrs[] = {
+	&dev_attr_sent_mbufs.attr,
+	&dev_attr_sent_bytes.attr,
+	&dev_attr_recv_mbufs.attr,
+	&dev_attr_recv_bytes.attr,
+	&dev_attr_nr_over_quota.attr,
+	&dev_attr_nr_tx_ready.attr,
+	&dev_attr_over_quota_time_total.attr,
+	&dev_attr_over_quota_time_avg.attr,
+	NULL,
+};
+
+static const struct attribute_group service_stat_attributes = {
+	.name   = "stats",
+	.attrs  = service_stat_dev_attrs,
+};
+
+static void delete_service(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	bool notify_on_destroy = true;
+
+	/* FIXME: Jira ticket SDK-3495 - philipd. */
+	/* This should be the caller's responsibility */
+	vs_get_service(service);
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	/*
+	 * If we're on the client side, the service should already have been
+	 * disabled at this point.
+	 */
+	WARN_ON(service->id != 0 && !session->is_server &&
+			service->readiness != VS_SERVICE_DISABLED &&
+			service->readiness != VS_SERVICE_DELETED);
+
+	/*
+	 * Make sure the service is not active, and notify the remote end if
+	 * it needs to be reset. Note that we already hold the core service
+	 * state lock iff this is a non-core service.
+	 */
+	__reset_service(service, true);
+
+	/*
+	 * If the remote end is aware that the service is inactive, we can
+	 * delete right away; otherwise we need to wait for a notification
+	 * that the service has reset.
+	 */
+	switch (service->readiness) {
+	case VS_SERVICE_LOCAL_DELETE:
+	case VS_SERVICE_DELETED:
+		/* Nothing to do here */
+		mutex_unlock(&service->ready_lock);
+		vs_put_service(service);
+		return;
+	case VS_SERVICE_ACTIVE:
+		BUG();
+		break;
+	case VS_SERVICE_LOCAL_RESET:
+		service->readiness = VS_SERVICE_LOCAL_DELETE;
+		break;
+	case VS_SERVICE_INIT:
+		notify_on_destroy = false;
+		/* Fall through */
+	default:
+		service->readiness = VS_SERVICE_DELETED;
+		destroy_service(service, notify_on_destroy);
+		break;
+	}
+
+	mutex_unlock(&service->ready_lock);
+
+	/*
+	 * Remove service syslink from
+	 * sys/vservices/(<server>/<client>)-sessions/ directory
+	 */
+	vs_service_remove_sysfs_entries(session, service);
+
+	sysfs_remove_group(&service->dev.kobj, &service_stat_attributes);
+
+	/*
+	 * On the client-side we need to release the service id as soon as
+	 * the service is deleted. Otherwise the server may attempt to create
+	 * a new service with this id.
+	 */
+	if (!session->is_server)
+		vs_session_release_service_id(service);
+
+	device_del(&service->dev);
+	vs_put_service(service);
+}
+
+/**
+ * vs_service_delete - deactivate and start removing a service device
+ * @service: the service to delete
+ * @caller: the service initiating deletion
+ *
+ * Services may only be deleted by their owner (on the server side), or by the
+ * core service. This function must not be called for the core service.
+ */
+int vs_service_delete(struct vs_service_device *service,
+		struct vs_service_device *caller)
+{
+	struct vs_session_device *session =
+			vs_service_get_session(service);
+	struct vs_service_device *core_service = session->core_service;
+
+	if (WARN_ON(!core_service))
+		return -ENODEV;
+
+	if (!service->id)
+		return -EINVAL;
+
+	if (caller != service->owner && caller != core_service)
+		return -EPERM;
+
+	delete_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_delete);
+
+/**
+ * vs_service_handle_delete - deactivate and start removing a service device
+ * @service: the service to delete
+ *
+ * This is a variant of vs_service_delete which must only be called by the
+ * core service. It is used by the core service client when a service_removed
+ * message is received.
+ */
+int vs_service_handle_delete(struct vs_service_device *service)
+{
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(service);
+	struct vs_service_device *core_service __maybe_unused =
+			session->core_service;
+
+	lockdep_assert_held(&core_service->state_mutex);
+
+	delete_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_handle_delete);
+
+static void service_cleanup_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, cleanup_work);
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev, "cleanup\n");
+
+	if (service->owner)
+		vs_put_service(service->owner);
+
+	/* Put our reference to the session */
+	if (service->dev.parent)
+		put_device(service->dev.parent);
+
+	tasklet_kill(&service->rx_tasklet);
+	cancel_work_sync(&service->rx_work);
+	cancel_delayed_work_sync(&service->cooloff_work);
+	cancel_delayed_work_sync(&service->ready_work);
+	cancel_work_sync(&service->reset_work);
+
+	if (service->work_queue)
+		destroy_workqueue(service->work_queue);
+
+	kfree(service->sysfs_name);
+	kfree(service->name);
+	kfree(service->protocol);
+	kfree(service);
+}
+
+static void vs_service_release(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	vs_dev_debug(VS_DEBUG_SESSION, vs_service_get_session(service),
+			&service->dev, "release\n");
+
+	/*
+	 * We need to defer cleanup to avoid a circular dependency between the
+	 * core service's state lock (which can be held at this point, on the
+	 * client side) and any non-core service's reset work (which we must
+	 * cancel here, and which acquires the core service state lock).
+	 */
+	schedule_work(&service->cleanup_work);
+}
+
+static int service_add_idr(struct vs_session_device *session,
+		struct vs_service_device *service, vs_service_id_t service_id)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	int err, base_id, id;
+
+	if (service_id == VS_SERVICE_AUTO_ALLOCATE_ID)
+		base_id = 1;
+	else
+		base_id = service_id;
+
+retry:
+	if (!idr_pre_get(&session->service_idr, GFP_KERNEL))
+		return -ENOMEM;
+
+	mutex_lock(&session->service_idr_lock);
+	err = idr_get_new_above(&session->service_idr, service, base_id, &id);
+	if (err == 0) {
+		if (service_id != VS_SERVICE_AUTO_ALLOCATE_ID &&
+				id != service_id) {
+			/* Failed to allocated the requested service id */
+			idr_remove(&session->service_idr, id);
+			mutex_unlock(&session->service_idr_lock);
+			return -EBUSY;
+		}
+		if (id > VS_MAX_SERVICE_ID) {
+			/* We are out of service ids */
+			idr_remove(&session->service_idr, id);
+			mutex_unlock(&session->service_idr_lock);
+			return -ENOSPC;
+		}
+	}
+	mutex_unlock(&session->service_idr_lock);
+	if (err == -EAGAIN)
+		goto retry;
+	if (err < 0)
+		return err;
+#else
+	int start, end, id;
+
+	if (service_id == VS_SERVICE_AUTO_ALLOCATE_ID) {
+		start = 1;
+		end = VS_MAX_SERVICES;
+	} else {
+		start = service_id;
+		end = service_id + 1;
+	}
+
+	mutex_lock(&session->service_idr_lock);
+	id = idr_alloc(&session->service_idr, service, start, end,
+			GFP_KERNEL);
+	mutex_unlock(&session->service_idr_lock);
+
+	if (id == -ENOSPC)
+		return -EBUSY;
+	else if (id < 0)
+		return id;
+#endif
+
+	service->id = id;
+	return 0;
+}
+
+static int
+vs_service_create_sysfs_entries(struct vs_session_device *session,
+		struct vs_service_device *service, vs_service_id_t id)
+{
+	int ret;
+	char *sysfs_name, *c;
+
+	/* Add a symlink to session device inside service device sysfs */
+	ret = sysfs_create_link(&service->dev.kobj, &session->dev.kobj,
+			VS_SESSION_SYMLINK_NAME);
+	if (ret) {
+		dev_err(&service->dev, "Error %d creating session symlink\n",
+				ret);
+		goto fail;
+	}
+
+	/* Get the length of the string for sysfs dir */
+	sysfs_name = kasprintf(GFP_KERNEL, "%s:%d", service->name, id);
+	if (!sysfs_name) {
+		ret = -ENOMEM;
+		goto fail_session_link;
+	}
+
+	/*
+	 * We dont want to create symlinks with /'s which could get interpreted
+	 * as another directory so replace all /'s with !'s
+	 */
+	while ((c = strchr(sysfs_name, '/')))
+		*c = '!';
+	ret = sysfs_create_link(session->sysfs_entry, &service->dev.kobj,
+			sysfs_name);
+	if (ret)
+		goto fail_free_sysfs_name;
+
+	service->sysfs_name = sysfs_name;
+
+	return 0;
+
+fail_free_sysfs_name:
+	kfree(sysfs_name);
+fail_session_link:
+	sysfs_remove_link(&service->dev.kobj, VS_SESSION_SYMLINK_NAME);
+fail:
+	return ret;
+}
+
+/**
+ * vs_service_register - create and register a new vs_service_device
+ * @session: the session device that is the parent of the service
+ * @owner: the service responsible for managing the new service
+ * @service_id: the ID of the new service
+ * @name: the name of the new service
+ * @protocol: the protocol for the new service
+ * @plat_data: value to be assigned to (struct device *)->platform_data
+ *
+ * This function should only be called by a session driver that is bound to
+ * the given session.
+ *
+ * The given service_id must not have been passed to a prior successful
+ * vs_service_register call, unless the service ID has since been freed by a
+ * call to the session driver's service_removed callback.
+ *
+ * The core service state lock must not be held while calling this function.
+ */
+struct vs_service_device *vs_service_register(struct vs_session_device *session,
+		struct vs_service_device *owner, vs_service_id_t service_id,
+		const char *protocol, const char *name, const void *plat_data)
+{
+	struct vs_service_device *service;
+	struct vs_session_driver *session_drv;
+	int ret = -EIO;
+	char *c;
+
+	if (service_id && !owner) {
+		dev_err(&session->dev, "Non-core service must have an owner\n");
+		ret = -EINVAL;
+		goto fail;
+	} else if (!service_id && owner) {
+		dev_err(&session->dev, "Core service must not have an owner\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (!session->dev.driver)
+		goto fail;
+
+	session_drv = to_vs_session_driver(session->dev.driver);
+
+	service = kzalloc(sizeof(*service), GFP_KERNEL);
+	if (!service) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	INIT_LIST_HEAD(&service->rx_queue);
+	INIT_WORK(&service->rx_work, service_rx_work);
+	INIT_WORK(&service->reset_work, service_reset_work);
+	INIT_DELAYED_WORK(&service->ready_work, service_ready_work);
+	INIT_DELAYED_WORK(&service->cooloff_work, service_cooloff_work);
+	INIT_WORK(&service->cleanup_work, service_cleanup_work);
+	spin_lock_init(&service->rx_lock);
+	init_waitqueue_head(&service->quota_wq);
+
+	service->owner = vs_get_service(owner);
+
+	service->readiness = VS_SERVICE_INIT;
+	mutex_init(&service->ready_lock);
+	service->driver_probed = false;
+
+	/*
+	 * Service state locks - A service is only allowed to use one of these
+	 */
+	spin_lock_init(&service->state_spinlock);
+	mutex_init(&service->state_mutex);
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	service->state_spinlock_used = false;
+	service->state_mutex_used = false;
+#endif
+
+	/* Lock ordering
+	 *
+	 * The dependency order for the various service locks is as follows:
+	 *
+	 * cooloff_work
+	 * reset_work
+	 * ready_work
+	 * ready_lock/0
+	 * rx_work/0
+	 * state_mutex/0
+	 * ready_lock/1
+	 * ...
+	 * state_mutex/n
+	 * state_spinlock
+	 *
+	 * The subclass is the service's rank in the hierarchy of
+	 * service ownership. This results in core having subclass 0 on
+	 * server-side and 1 on client-side. Services directly created
+	 * by the core will have a lock subclass value of 2 for
+	 * servers, 3 for clients. Services created by non-core
+	 * services will have a lock subclass value of x + 1, where x
+	 * is the lock subclass of the creator service. (e.g servers
+	 * will have even numbered lock subclasses, clients will have
+	 * odd numbered lock subclasses).
+	 *
+	 * If a service driver has any additional locks for protecting
+	 * internal state, they will generally fit between state_mutex/n and
+	 * ready_lock/n+1 on this list. For the core service, this applies to
+	 * the session lock.
+	 */
+
+	if (owner)
+		service->lock_subclass = owner->lock_subclass + 2;
+	else
+		service->lock_subclass = session->is_server ? 0 : 1;
+
+#ifdef CONFIG_LOCKDEP
+	if (service->lock_subclass >= MAX_LOCKDEP_SUBCLASSES) {
+		dev_warn(&session->dev, "Owner hierarchy is too deep, lockdep will fail\n");
+	} else {
+		/*
+		 * We need to set the default subclass for the rx work,
+		 * because the workqueue API doesn't (and can't) provide
+		 * anything like lock_nested() for it.
+		 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+		/*
+		 * Lockdep allows a specific lock's subclass to be set with
+		 * the subclass argument to lockdep_init_map(). However, prior
+		 * to Linux 3.3, that only works the first time it is called
+		 * for a given class and subclass. So we have to fake it,
+		 * putting every subclass in a different class, so the only
+		 * thing that breaks is printing the subclass in lockdep
+		 * warnings.
+		 */
+		static struct lock_class_key
+				rx_work_keys[MAX_LOCKDEP_SUBCLASSES];
+		struct lock_class_key *key =
+				&rx_work_keys[service->lock_subclass];
+#else
+		struct lock_class_key *key = service->rx_work.lockdep_map.key;
+#endif
+
+		/*
+		 * We can't use the lockdep_set_class() macro because the
+		 * work's lockdep map is called .lockdep_map instead of
+		 * .dep_map.
+		 */
+		lockdep_init_map(&service->rx_work.lockdep_map,
+				"&service->rx_work", key,
+				service->lock_subclass);
+	}
+#endif
+
+	/*
+	 * Copy the protocol and name. Remove any leading or trailing
+	 * whitespace characters (including newlines) since the strings
+	 * may have been passed via sysfs files.
+	 */
+	if (protocol) {
+		service->protocol = kstrdup(protocol, GFP_KERNEL);
+		if (!service->protocol) {
+			ret = -ENOMEM;
+			goto fail_copy_protocol;
+		}
+		c = strim(service->protocol);
+		if (c != service->protocol)
+			memmove(service->protocol, c,
+					strlen(service->protocol) + 1);
+	}
+
+	service->name = kstrdup(name, GFP_KERNEL);
+	if (!service->name) {
+		ret = -ENOMEM;
+		goto fail_copy_name;
+	}
+	c = strim(service->name);
+	if (c != service->name)
+		memmove(service->name, c, strlen(service->name) + 1);
+
+	service->is_server = session_drv->is_server;
+
+	/* Grab a reference to the session we are on */
+	service->dev.parent = get_device(&session->dev);
+	service->dev.bus = session_drv->service_bus;
+	service->dev.release = vs_service_release;
+
+	service->last_reset = 0;
+	service->last_reset_request = 0;
+	service->last_ready = 0;
+	service->reset_delay = 0;
+
+	device_initialize(&service->dev);
+	service->dev.platform_data = (void *)plat_data;
+
+	ret = service_add_idr(session, service, service_id);
+	if (ret)
+		goto fail_add_idr;
+
+#ifdef CONFIG_VSERVICES_NAMED_DEVICE
+	/* Integrate session and service names in vservice devnodes */
+	dev_set_name(&service->dev, "vservice-%s:%s:%s:%d:%d",
+			session->is_server ? "server" : "client",
+			session->name, service->name,
+			session->session_num, service->id);
+#else
+	dev_set_name(&service->dev, "%s:%d", dev_name(&session->dev),
+			service->id);
+#endif
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	if (service->id > 0)
+		service->dev.devt = MKDEV(vservices_cdev_major,
+			(session->session_num * VS_MAX_SERVICES) +
+			service->id);
+#endif
+
+	service->work_queue = vs_create_workqueue(dev_name(&service->dev));
+	if (!service->work_queue) {
+		ret = -ENOMEM;
+		goto fail_create_workqueue;
+	}
+
+	tasklet_init(&service->rx_tasklet, service_rx_tasklet,
+			(unsigned long)service);
+
+	/*
+	 * If this is the core service, set the core service pointer in the
+	 * session.
+	 */
+	if (service->id == 0) {
+		mutex_lock(&session->service_idr_lock);
+		if (session->core_service) {
+			ret = -EEXIST;
+			mutex_unlock(&session->service_idr_lock);
+			goto fail_become_core;
+		}
+
+		/* Put in vs_session_bus_remove() */
+		session->core_service = vs_get_service(service);
+		mutex_unlock(&session->service_idr_lock);
+	}
+
+	/* Notify the transport */
+	ret = session->transport->vt->service_add(session->transport, service);
+	if (ret) {
+		dev_err(&session->dev,
+				"Failed to add service %d (%s:%s) to transport: %d\n",
+				service->id, service->name,
+				service->protocol, ret);
+		goto fail_transport_add;
+	}
+
+	/* Delay uevent until vs_service_start(). */
+	dev_set_uevent_suppress(&service->dev, true);
+
+	ret = device_add(&service->dev);
+	if (ret)
+		goto fail_device_add;
+
+	/* Create the service statistics sysfs group */
+	ret = sysfs_create_group(&service->dev.kobj, &service_stat_attributes);
+	if (ret)
+		goto fail_sysfs_create_group;
+
+	/* Create additional sysfs files */
+	ret = vs_service_create_sysfs_entries(session, service, service->id);
+	if (ret)
+		goto fail_sysfs_add_entries;
+
+	return service;
+
+fail_sysfs_add_entries:
+	sysfs_remove_group(&service->dev.kobj, &service_stat_attributes);
+fail_sysfs_create_group:
+	device_del(&service->dev);
+fail_device_add:
+	session->transport->vt->service_remove(session->transport, service);
+fail_transport_add:
+	if (service->id == 0) {
+		session->core_service = NULL;
+		vs_put_service(service);
+	}
+fail_become_core:
+fail_create_workqueue:
+	vs_session_release_service_id(service);
+fail_add_idr:
+	/*
+	 * device_initialize() has been called, so we must call put_device()
+	 * and let vs_service_release() handle the rest of the cleanup.
+	 */
+	put_device(&service->dev);
+	return ERR_PTR(ret);
+
+fail_copy_name:
+	if (service->protocol)
+		kfree(service->protocol);
+fail_copy_protocol:
+	kfree(service);
+fail:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(vs_service_register);
+
+/**
+ * vs_session_get_service - Look up a service by ID on a session and get
+ * a reference to it. The caller must call vs_put_service when it is finished
+ * with the service.
+ *
+ * @session: The session to search for the service on
+ * @service_id: ID of the service to find
+ */
+struct vs_service_device *
+vs_session_get_service(struct vs_session_device *session,
+		vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+
+	if (!session)
+		return NULL;
+
+	rcu_read_lock();
+	service = idr_find(&session->service_idr, service_id);
+	if (!service) {
+		rcu_read_unlock();
+		return NULL;
+	}
+	vs_get_service(service);
+	rcu_read_unlock();
+
+	return service;
+}
+EXPORT_SYMBOL_GPL(vs_session_get_service);
+
+/**
+ * __for_each_service - Iterate over all non-core services on a session.
+ *
+ * @session: Session to iterate services on
+ * @func: Callback function for each iterated service
+ *
+ * Iterate over all services on a session, excluding the core service, and
+ * call a callback function on each.
+ */
+static void __for_each_service(struct vs_session_device *session,
+		void (*func)(struct vs_service_device *))
+{
+	struct vs_service_device *service;
+	int id;
+
+	for (id = 1; ; id++) {
+		rcu_read_lock();
+		service = idr_get_next(&session->service_idr, &id);
+		if (!service) {
+			rcu_read_unlock();
+			break;
+		}
+		vs_get_service(service);
+		rcu_read_unlock();
+
+		func(service);
+		vs_put_service(service);
+	}
+}
+
+/**
+ * vs_session_delete_noncore - immediately delete all non-core services
+ * @session: the session whose services are to be deleted
+ *
+ * This function disables and deletes all non-core services without notifying
+ * the core service. It must only be called by the core service, with its state
+ * lock held. It is used when the core service client disconnects or
+ * resets, and when the core service server has its driver removed.
+ */
+void vs_session_delete_noncore(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service __maybe_unused =
+			session->core_service;
+
+	lockdep_assert_held(&core_service->state_mutex);
+
+	vs_session_disable_noncore(session);
+
+	__for_each_service(session, delete_service);
+}
+EXPORT_SYMBOL_GPL(vs_session_delete_noncore);
+
+/**
+ * vs_session_for_each_service - Iterate over all initialised and non-deleted
+ * non-core services on a session.
+ *
+ * @session: Session to iterate services on
+ * @func: Callback function for each iterated service
+ * @data: Extra data to pass to the callback
+ *
+ * Iterate over all services on a session, excluding the core service and any
+ * service that has been deleted or has not yet had vs_service_start() called,
+ * and call a callback function on each. The callback function is called with
+ * the service's ready lock held.
+ */
+void vs_session_for_each_service(struct vs_session_device *session,
+		void (*func)(struct vs_service_device *, void *), void *data)
+{
+	struct vs_service_device *service;
+	int id;
+
+	for (id = 1; ; id++) {
+		rcu_read_lock();
+		service = idr_get_next(&session->service_idr, &id);
+		if (!service) {
+			rcu_read_unlock();
+			break;
+		}
+		vs_get_service(service);
+		rcu_read_unlock();
+
+		mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+		if (service->readiness != VS_SERVICE_LOCAL_DELETE &&
+				service->readiness != VS_SERVICE_DELETED &&
+				service->readiness != VS_SERVICE_INIT)
+			func(service, data);
+
+		mutex_unlock(&service->ready_lock);
+		vs_put_service(service);
+	}
+}
+
+static void force_disable_service(struct vs_service_device *service,
+		void *unused)
+{
+	lockdep_assert_held(&service->ready_lock);
+
+	if (service->readiness == VS_SERVICE_ACTIVE)
+		__reset_service(service, false);
+
+	disable_service(service, true);
+}
+
+/**
+ * vs_session_disable_noncore - immediately disable all non-core services
+ * @session: the session whose services are to be disabled
+ *
+ * This function must be called by the core service driver to disable all
+ * services, whenever it resets or is otherwise disconnected. It is called
+ * directly by the server-side core service, and by the client-side core
+ * service via vs_session_delete_noncore().
+ */
+void vs_session_disable_noncore(struct vs_session_device *session)
+{
+	vs_session_for_each_service(session, force_disable_service, NULL);
+}
+EXPORT_SYMBOL_GPL(vs_session_disable_noncore);
+
+static void try_enable_service(struct vs_service_device *service, void *unused)
+{
+	lockdep_assert_held(&service->ready_lock);
+
+	__enable_service(service);
+}
+
+/**
+ * vs_session_enable_noncore - enable all disabled non-core services
+ * @session: the session whose services are to be enabled
+ *
+ * This function is called by the core server driver to enable all services
+ * when the core client connects.
+ */
+void vs_session_enable_noncore(struct vs_session_device *session)
+{
+	vs_session_for_each_service(session, try_enable_service, NULL);
+}
+EXPORT_SYMBOL_GPL(vs_session_enable_noncore);
+
+/**
+ * vs_session_handle_message - process an incoming message from a transport
+ * @session: the session that is receiving the message
+ * @mbuf: a buffer containing the message payload
+ * @service_id: the id of the service that the message was addressed to
+ *
+ * This routine will return 0 if the buffer was accepted, or a negative value
+ * otherwise. In the latter case the caller should free the buffer. If the
+ * error is fatal, this routine will reset the service.
+ *
+ * This routine may be called from interrupt context.
+ *
+ * The caller must always serialise calls to this function relative to
+ * vs_session_handle_reset and vs_session_handle_activate. We don't do this
+ * internally, to avoid having to disable interrupts when called from task
+ * context.
+ */
+int vs_session_handle_message(struct vs_session_device *session,
+		struct vs_mbuf *mbuf, vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+	struct vs_transport *transport;
+	unsigned long flags;
+
+	transport = session->transport;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service) {
+		dev_err(&session->dev, "message for unknown service %d\n",
+				service_id);
+		session_fatal_error(session, GFP_ATOMIC);
+		return -ENOTCONN;
+	}
+
+	/*
+	 * Take the rx lock before checking service readiness. This guarantees
+	 * that if __reset_service() has just made the service inactive, we
+	 * either see it and don't enqueue the message, or else enqueue the
+	 * message before cancel_pending_rx() runs (and removes it).
+	 */
+	spin_lock_irqsave(&service->rx_lock, flags);
+
+	/* If the service is not active, drop the message. */
+	if (service->readiness != VS_SERVICE_ACTIVE) {
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+		vs_put_service(service);
+		return -ECONNRESET;
+	}
+
+	list_add_tail(&mbuf->queue, &service->rx_queue);
+	spin_unlock_irqrestore(&service->rx_lock, flags);
+
+	/* Schedule processing of the message by the service's drivers. */
+	queue_rx_work(service);
+	vs_put_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_message);
+
+/**
+ * vs_session_quota_available - notify a service that it can transmit
+ * @session: the session owning the service that is ready
+ * @service_id: the id of the service that is ready
+ * @count: the number of buffers that just became ready
+ * @call_tx_ready: true if quota has just become nonzero due to a buffer being
+ *                 freed by the remote communication partner
+ *
+ * This routine is called by the transport driver when a send-direction
+ * message buffer becomes free. It wakes up any task that is waiting for
+ * send quota to become available.
+ *
+ * This routine may be called from interrupt context from the transport
+ * driver, and as such, it may not sleep.
+ *
+ * The caller must always serialise calls to this function relative to
+ * vs_session_handle_reset and vs_session_handle_activate. We don't do this
+ * internally, to avoid having to disable interrupts when called from task
+ * context.
+ *
+ * If the call_tx_ready argument is true, this function also schedules a
+ * call to the driver's tx_ready callback. Note that this never has priority
+ * over handling incoming messages; it will only be handled once the receive
+ * queue is empty. This is to increase batching of outgoing messages, and also
+ * to reduce the chance that an outgoing message will be dropped by the partner
+ * because an incoming message has already changed the state.
+ *
+ * In general, task context drivers should use the waitqueue, and softirq
+ * context drivers (with tx_atomic set) should use tx_ready.
+ */
+void vs_session_quota_available(struct vs_session_device *session,
+		vs_service_id_t service_id, unsigned count,
+		bool send_tx_ready)
+{
+	struct vs_service_device *service;
+	unsigned long flags;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service) {
+		dev_err(&session->dev, "tx ready for unknown service %d\n",
+				service_id);
+		session_fatal_error(session, GFP_ATOMIC);
+		return;
+	}
+
+	wake_up_nr(&service->quota_wq, count);
+
+	if (send_tx_ready) {
+		/*
+		 * Take the rx lock before checking service readiness. This
+		 * guarantees that if __reset_service() has just made the
+		 * service inactive, we either see it and don't set the tx_ready
+		 * flag, or else set the flag before cancel_pending_rx() runs
+		 * (and clears it).
+		 */
+		spin_lock_irqsave(&service->rx_lock, flags);
+
+		/* If the service is not active, drop the tx_ready event */
+		if (service->readiness != VS_SERVICE_ACTIVE) {
+			spin_unlock_irqrestore(&service->rx_lock, flags);
+			vs_put_service(service);
+			return;
+		}
+
+		service->tx_ready = true;
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+
+		/* Schedule RX processing by the service driver. */
+		queue_rx_work(service);
+	}
+
+	vs_put_service(service);
+}
+EXPORT_SYMBOL_GPL(vs_session_quota_available);
+
+/**
+ * vs_session_handle_notify - process an incoming notification from a transport
+ * @session: the session that is receiving the notification
+ * @flags: notification flags
+ * @service_id: the id of the service that the notification was addressed to
+ *
+ * This function may be called from interrupt context from the transport driver,
+ * and as such, it may not sleep.
+ */
+void vs_session_handle_notify(struct vs_session_device *session,
+		unsigned long bits, vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+	struct vs_service_driver *driver;
+	unsigned long flags;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service) {
+		/* Ignore the notification since the service id doesn't exist */
+		dev_err(&session->dev, "notification for unknown service %d\n",
+				service_id);
+		return;
+	}
+
+	/*
+	 * Take the rx lock before checking service readiness. This guarantees
+	 * that if __reset_service() has just made the service inactive, we
+	 * either see it and don't send the notification, or else send it
+	 * before cancel_pending_rx() runs (and thus before the driver is
+	 * deactivated).
+	 */
+	spin_lock_irqsave(&service->rx_lock, flags);
+
+	/* If the service is not active, drop the notification. */
+	if (service->readiness != VS_SERVICE_ACTIVE) {
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+		vs_put_service(service);
+		return;
+	}
+
+	/* There should be a driver bound on the service */
+	if (WARN_ON(!service->dev.driver)) {
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+		vs_put_service(service);
+		return;
+	}
+
+	driver = to_vs_service_driver(service->dev.driver);
+	/* Call the driver's notify function */
+	driver->notify(service, bits);
+
+	spin_unlock_irqrestore(&service->rx_lock, flags);
+	vs_put_service(service);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_notify);
+
+static unsigned long reset_cool_off(struct vs_service_device *service)
+{
+	return service->reset_delay * RESET_THROTTLE_COOL_OFF_MULT;
+}
+
+static bool ready_needs_delay(struct vs_service_device *service)
+{
+	/*
+	 * We throttle resets if too little time elapsed between the service
+	 * last becoming ready, and the service last starting a reset.
+	 *
+	 * We do not use the current time here because it includes the time
+	 * taken by the local service driver to actually process the reset.
+	 */
+	return service->last_reset && service->last_ready && time_before(
+			service->last_reset,
+			service->last_ready + RESET_THROTTLE_TIME);
+}
+
+static bool reset_throttle_cooled_off(struct vs_service_device *service)
+{
+	/*
+	 * Reset throttling cools off if enough time has elapsed since the
+	 * last reset request.
+	 *
+	 * We check against the last requested reset, not the last serviced
+	 * reset or ready. If we are throttling, a reset may not have been
+	 * serviced for some time even though we are still receiving requests.
+	 */
+	return service->reset_delay && service->last_reset_request &&
+			time_after(jiffies, service->last_reset_request +
+					reset_cool_off(service));
+}
+
+/*
+ * Queue up the ready work for a service. If a service is resetting too fast
+ * then it will be throttled using an exponentially increasing delay before
+ * marking it ready. If the reset speed backs off then the ready throttling
+ * will be cleared. If a service reaches the maximum throttling delay then all
+ * resets will be ignored until the cool off period has elapsed.
+ *
+ * The basic logic of the reset throttling is:
+ *
+ *  - If a reset request is processed and the last ready was less than
+ *    RESET_THROTTLE_TIME ago, then the ready needs to be delayed to
+ *    throttle resets.
+ *
+ *  - The ready delay increases exponentially on each throttled reset
+ *    between RESET_THROTTLE_MIN and RESET_THROTTLE_MAX.
+ *
+ *  - If RESET_THROTTLE_MAX is reached then no ready will be sent until the
+ *    reset requests have cooled off.
+ *
+ *  - Reset requests have cooled off when no reset requests have been
+ *    received for RESET_THROTTLE_COOL_OFF_MULT * the service's current
+ *    ready delay. The service's reset throttling is disabled.
+ *
+ * Note: Be careful when adding print statements, including debugging, to
+ * this function. The ready throttling is intended to prevent DOSing of the
+ * vServices due to repeated resets (e.g. because of a persistent failure).
+ * Adding a printk on each reset for example would reset in syslog spamming
+ * which is a DOS attack in itself.
+ *
+ * The ready lock must be held by the caller.
+ */
+static void queue_ready_work(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	unsigned long delay;
+	bool wait_for_cooloff = false;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	/* This should only be called when the service enters reset. */
+	WARN_ON(service->readiness != VS_SERVICE_RESET);
+
+	if (ready_needs_delay(service)) {
+		/* Reset delay increments exponentially */
+		if (!service->reset_delay) {
+			service->reset_delay = RESET_THROTTLE_MIN;
+		} else if (service->reset_delay < RESET_THROTTLE_MAX) {
+			service->reset_delay *= 2;
+		} else {
+			wait_for_cooloff = true;
+		}
+
+		delay = service->reset_delay;
+	} else {
+		/* The reset request appears to have been be sane. */
+		delay = 0;
+
+	}
+
+	if (service->reset_delay > 0) {
+		/*
+		 * Schedule cooloff work, to set the reset_delay to 0 if
+		 * the reset requests stop for long enough.
+		 */
+		schedule_delayed_work(&service->cooloff_work,
+				reset_cool_off(service));
+	}
+
+	if (wait_for_cooloff) {
+		/*
+		 * We need to finish cooling off before we service resets
+		 * again. Schedule cooloff_work to run after the current
+		 * cooloff period ends; it may reschedule itself even later
+		 * if any more requests arrive.
+		 */
+		dev_err(&session->dev,
+				"Service %s is resetting too fast - must cool off for %u ms\n",
+				dev_name(&service->dev),
+				jiffies_to_msecs(reset_cool_off(service)));
+		return;
+	}
+
+	if (delay)
+		dev_err(&session->dev,
+				"Service %s is resetting too fast - delaying ready by %u ms\n",
+				dev_name(&service->dev),
+				jiffies_to_msecs(delay));
+
+	vs_debug(VS_DEBUG_SESSION, session,
+			"Service %s will become ready in %u ms\n",
+			dev_name(&service->dev),
+			jiffies_to_msecs(delay));
+
+	if (service->last_ready)
+		vs_debug(VS_DEBUG_SESSION, session,
+				"Last became ready %u ms ago\n",
+				msecs_ago(service->last_ready));
+	if (service->reset_delay >= RESET_THROTTLE_MAX)
+		dev_err(&session->dev, "Service %s hit max reset throttle\n",
+				dev_name(&service->dev));
+
+	schedule_delayed_work(&service->ready_work, delay);
+}
+
+static void session_activation_work(struct work_struct *work)
+{
+	struct vs_session_device *session = container_of(work,
+			struct vs_session_device, activation_work);
+	struct vs_service_device *core_service = session->core_service;
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	int activation_state;
+	int ret;
+
+	if (WARN_ON(!core_service))
+		return;
+
+	if (WARN_ON(!session_drv))
+		return;
+
+	/*
+	 * We use an atomic to prevent duplicate activations if we race with
+	 * an activate after a reset. This is very unlikely, but possible if
+	 * this work item is preempted.
+	 */
+	activation_state = atomic_cmpxchg(&session->activation_state,
+			VS_SESSION_ACTIVATE, VS_SESSION_ACTIVE);
+
+	switch (activation_state) {
+	case VS_SESSION_ACTIVATE:
+		vs_debug(VS_DEBUG_SESSION, session,
+				"core service will be activated\n");
+		vs_service_enable(core_service);
+		break;
+
+	case VS_SESSION_RESET:
+		vs_debug(VS_DEBUG_SESSION, session,
+				"core service will be deactivated\n");
+
+		/* Handle the core service reset */
+		ret = service_handle_reset(session, core_service, true);
+
+		/* Tell the transport if the reset succeeded */
+		if (ret >= 0)
+			session->transport->vt->ready(session->transport);
+		else
+			dev_err(&session->dev, "core service reset unhandled: %d\n",
+					ret);
+
+		break;
+
+	default:
+		vs_debug(VS_DEBUG_SESSION, session,
+				"core service already active\n");
+		break;
+	}
+}
+
+/**
+ * vs_session_handle_reset - Handle a reset at the session layer.
+ * @session: Session to reset
+ *
+ * This function is called by the transport when it receives a transport-level
+ * reset notification.
+ *
+ * After a session is reset by calling this function, it will reset all of its
+ * attached services, and then call the transport's ready callback. The
+ * services will remain in reset until the session is re-activated by a call
+ * to vs_session_handle_activate().
+ *
+ * Calling this function on a session that is already reset is permitted, as
+ * long as the transport accepts the consequent duplicate ready callbacks.
+ *
+ * A newly created session is initially in the reset state, and will not call
+ * the transport's ready callback. The transport may choose to either act as
+ * if the ready callback had been called, or call this function again to
+ * trigger a new ready callback.
+ */
+void vs_session_handle_reset(struct vs_session_device *session)
+{
+	atomic_set(&session->activation_state, VS_SESSION_RESET);
+
+	schedule_work(&session->activation_work);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_reset);
+
+/**
+ * vs_session_handle_activate - Allow a session to leave the reset state.
+ * @session: Session to mark active.
+ *
+ * This function is called by the transport when a transport-level reset is
+ * completed; that is, after the session layer has reset its services and
+ * called the ready callback, at *both* ends of the connection.
+ */
+void vs_session_handle_activate(struct vs_session_device *session)
+{
+	atomic_set(&session->activation_state, VS_SESSION_ACTIVATE);
+
+	schedule_work(&session->activation_work);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_activate);
+
+static ssize_t id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", session->session_num);
+}
+
+/*
+ * The vServices session device type
+ */
+static ssize_t is_server_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", session->is_server);
+}
+
+static ssize_t name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", session->name);
+}
+
+#ifdef CONFIG_VSERVICES_DEBUG
+static ssize_t debug_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%.8lx\n", session->debug_mask);
+}
+
+static ssize_t debug_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	int err;
+
+	err = kstrtoul(buf, 0, &session->debug_mask);
+	if (err)
+		return err;
+
+	/* Clear any bits we don't know about */
+	session->debug_mask &= VS_DEBUG_ALL;
+
+	return count;
+}
+#endif /* CONFIG_VSERVICES_DEBUG */
+
+static struct device_attribute vservices_session_dev_attrs[] = {
+	__ATTR_RO(id),
+	__ATTR_RO(is_server),
+	__ATTR_RO(name),
+#ifdef CONFIG_VSERVICES_DEBUG
+	__ATTR(debug_mask, S_IRUGO | S_IWUSR,
+			debug_mask_show, debug_mask_store),
+#endif
+	__ATTR_NULL,
+};
+
+static int vs_session_free_idr(struct vs_session_device *session)
+{
+	mutex_lock(&vs_session_lock);
+	idr_remove(&session_idr, session->session_num);
+	mutex_unlock(&vs_session_lock);
+	return 0;
+}
+
+static void vs_session_device_release(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	vs_session_free_idr(session);
+
+	kfree(session->name);
+	kfree(session);
+}
+
+/*
+ * The vServices session bus
+ */
+static int vs_session_bus_match(struct device *dev,
+		struct device_driver *driver)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_session_driver *session_drv = to_vs_session_driver(driver);
+
+	return (session->is_server == session_drv->is_server);
+}
+
+static int vs_session_bus_remove(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_service_device *core_service = session->core_service;
+
+	if (!core_service)
+		return 0;
+
+	/*
+	 * Abort any pending session activation. We rely on the transport to
+	 * not call vs_session_handle_activate after this point.
+	 */
+	cancel_work_sync(&session->activation_work);
+
+	/* Abort any pending fatal error handling, which is redundant now. */
+	cancel_work_sync(&session->fatal_error_work);
+
+	/*
+	 * Delete the core service. This will implicitly delete everything
+	 * else (in reset on the client side, and in release on the server
+	 * side). The session holds a reference, so this won't release the
+	 * service struct.
+	 */
+	delete_service(core_service);
+
+	/* Now clean up the core service. */
+	session->core_service = NULL;
+
+	/* Matches the get in vs_service_register() */
+	vs_put_service(core_service);
+
+	return 0;
+}
+
+static int vservices_session_uevent(struct device *dev,
+		struct kobj_uevent_env *env)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	dev_dbg(dev, "uevent\n");
+
+	if (add_uevent_var(env, "IS_SERVER=%d", session->is_server))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SESSION_ID=%d", session->session_num))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void vservices_session_shutdown(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	dev_dbg(dev, "shutdown\n");
+
+	/* Do a transport reset */
+	session->transport->vt->reset(session->transport);
+}
+
+struct bus_type vs_session_bus_type = {
+	.name		= "vservices-session",
+	.match		= vs_session_bus_match,
+	.remove		= vs_session_bus_remove,
+	.dev_attrs	= vservices_session_dev_attrs,
+	.uevent		= vservices_session_uevent,
+	.shutdown	= vservices_session_shutdown,
+};
+EXPORT_SYMBOL_GPL(vs_session_bus_type);
+
+/*
+ * Common code for the vServices client and server buses
+ */
+int vs_service_bus_probe(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(dev->driver);
+	struct vs_session_device *session = vs_service_get_session(service);
+	int ret;
+
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev, "probe\n");
+
+	/*
+	 * Increase the reference count on the service driver. We don't allow
+	 * service driver modules to be removed if there are any device
+	 * instances present. The devices must be explicitly removed first.
+	 */
+	if (!try_module_get(vsdrv->driver.owner))
+		return -ENODEV;
+
+	ret = vsdrv->probe(service);
+	if (ret) {
+		module_put(vsdrv->driver.owner);
+		return ret;
+	}
+
+	service->driver_probed = true;
+
+	try_start_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_probe);
+
+int vs_service_bus_remove(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(dev->driver);
+	int err = 0;
+
+	reset_service(service);
+
+	/* Prevent reactivation of the driver */
+	service->driver_probed = false;
+
+	/* The driver has now had its reset() callback called; remove it */
+	vsdrv->remove(service);
+
+	/*
+	 * Take the service's state mutex and spinlock. This ensures that any
+	 * thread that is calling vs_state_lock_safe[_bh] will either complete
+	 * now, or see the driver removal and fail, irrespective of which type
+	 * of lock it is using.
+	 */
+	mutex_lock_nested(&service->state_mutex, service->lock_subclass);
+	spin_lock_bh(&service->state_spinlock);
+
+	/* Release all the locks. */
+	spin_unlock_bh(&service->state_spinlock);
+	mutex_unlock(&service->state_mutex);
+	mutex_unlock(&service->ready_lock);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	service->state_spinlock_used = false;
+	service->state_mutex_used = false;
+#endif
+
+	module_put(vsdrv->driver.owner);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_remove);
+
+int vs_service_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	dev_dbg(dev, "uevent\n");
+
+	if (add_uevent_var(env, "IS_SERVER=%d", service->is_server))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SERVICE_ID=%d", service->id))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SESSION_ID=%d", session->session_num))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SERVICE_NAME=%s", service->name))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "PROTOCOL=%s", service->protocol ?: ""))
+		return -ENOMEM;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_uevent);
+
+static int vs_session_create_sysfs_entry(struct vs_transport *transport,
+		struct vs_session_device *session, bool server,
+		const char *transport_name)
+{
+	char *sysfs_name;
+	struct kobject *sysfs_parent = vservices_client_root;
+
+	if (!transport_name)
+		return -EINVAL;
+
+	sysfs_name = kasprintf(GFP_KERNEL, "%s:%s", transport->type,
+			transport_name);
+	if (!sysfs_name)
+		return -ENOMEM;
+
+	if (server)
+		sysfs_parent = vservices_server_root;
+
+	session->sysfs_entry = kobject_create_and_add(sysfs_name, sysfs_parent);
+
+	kfree(sysfs_name);
+	if (!session->sysfs_entry)
+		return -ENOMEM;
+	return 0;
+}
+
+static int vs_session_alloc_idr(struct vs_session_device *session)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	int err, id;
+
+retry:
+	if (!idr_pre_get(&session_idr, GFP_KERNEL))
+		return -ENOMEM;
+
+	mutex_lock(&vs_session_lock);
+	err = idr_get_new_above(&session_idr, session, 0, &id);
+	if (err == 0) {
+		if (id >= VS_MAX_SESSIONS) {
+			/* We are out of session ids */
+			idr_remove(&session_idr, id);
+			mutex_unlock(&vs_session_lock);
+			return -EBUSY;
+		}
+	}
+	mutex_unlock(&vs_session_lock);
+	if (err == -EAGAIN)
+		goto retry;
+	if (err < 0)
+		return err;
+#else
+	int id;
+
+	mutex_lock(&vs_session_lock);
+	id = idr_alloc(&session_idr, session, 0, VS_MAX_SESSIONS, GFP_KERNEL);
+	mutex_unlock(&vs_session_lock);
+
+	if (id == -ENOSPC)
+		return -EBUSY;
+	else if (id < 0)
+		return id;
+#endif
+
+	session->session_num = id;
+	return 0;
+}
+
+/**
+ * vs_session_register - register a vservices session on a transport
+ * @transport: vservices transport that the session will attach to
+ * @parent: device that implements the transport (for sysfs)
+ * @server: true if the session is server-side
+ * @transport_name: name of the transport
+ *
+ * This function is intended to be called from the probe() function of a
+ * transport driver. It sets up a new session device, which then either
+ * performs automatic service discovery (for clients) or creates sysfs nodes
+ * that allow the user to create services (for servers).
+ *
+ * Note that the parent is only used by the driver framework; it is not
+ * directly accessed by the session drivers. Thus, a single transport device
+ * can support multiple sessions, as long as they each have a unique struct
+ * vs_transport.
+ *
+ * Note: This function may sleep, and therefore must not be called from
+ * interrupt context.
+ *
+ * Returns a pointer to the new device, or an error pointer.
+ */
+struct vs_session_device *vs_session_register(struct vs_transport *transport,
+		struct device *parent, bool server, const char *transport_name)
+{
+	struct device *dev;
+	struct vs_session_device *session;
+	int ret = -ENOMEM;
+
+	WARN_ON(!transport);
+
+	session = kzalloc(sizeof(*session), GFP_KERNEL);
+	if (!session)
+		goto fail_session_alloc;
+
+	session->transport = transport;
+	session->is_server = server;
+	session->name = kstrdup(transport_name, GFP_KERNEL);
+	if (!session->name)
+		goto fail_free_session;
+
+	INIT_WORK(&session->activation_work, session_activation_work);
+	INIT_WORK(&session->fatal_error_work, session_fatal_error_work);
+
+#ifdef CONFIG_VSERVICES_DEBUG
+	session->debug_mask = default_debug_mask & VS_DEBUG_ALL;
+#endif
+
+	idr_init(&session->service_idr);
+	mutex_init(&session->service_idr_lock);
+
+	/*
+	 * We must create session sysfs entry before device_create
+	 * so, that sysfs entry is available while registering
+	 * core service.
+	 */
+	ret = vs_session_create_sysfs_entry(transport, session, server,
+			transport_name);
+	if (ret)
+		goto fail_free_session;
+
+	ret = vs_session_alloc_idr(session);
+	if (ret)
+		goto fail_sysfs_entry;
+
+	dev = &session->dev;
+	dev->parent = parent;
+	dev->bus = &vs_session_bus_type;
+	dev->release = vs_session_device_release;
+	dev_set_name(dev, "vservice:%d", session->session_num);
+
+	ret = device_register(dev);
+	if (ret) {
+		goto fail_session_map;
+	}
+
+	/* Add a symlink to transport device inside session device sysfs dir */
+	if (parent) {
+		ret = sysfs_create_link(&session->dev.kobj,
+				&parent->kobj, VS_TRANSPORT_SYMLINK_NAME);
+		if (ret) {
+			dev_err(&session->dev,
+					"Error %d creating transport symlink\n",
+					ret);
+			goto fail_session_device_unregister;
+		}
+	}
+
+	return session;
+
+fail_session_device_unregister:
+	device_unregister(&session->dev);
+	kobject_put(session->sysfs_entry);
+	/* Remaining cleanup will be done in vs_session_release */
+	return ERR_PTR(ret);
+fail_session_map:
+	vs_session_free_idr(session);
+fail_sysfs_entry:
+	kobject_put(session->sysfs_entry);
+fail_free_session:
+	kfree(session->name);
+	kfree(session);
+fail_session_alloc:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(vs_session_register);
+
+void vs_session_start(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service = session->core_service;
+
+	if (WARN_ON(!core_service))
+		return;
+
+	blocking_notifier_call_chain(&vs_session_notifier_list,
+			VS_SESSION_NOTIFY_ADD, session);
+
+	vs_service_start(core_service);
+}
+EXPORT_SYMBOL_GPL(vs_session_start);
+
+/**
+ * vs_session_unregister - unregister a session device
+ * @session: the session device to unregister
+ */
+void vs_session_unregister(struct vs_session_device *session)
+{
+	if (session->dev.parent)
+		sysfs_remove_link(&session->dev.kobj, VS_TRANSPORT_SYMLINK_NAME);
+	blocking_notifier_call_chain(&vs_session_notifier_list,
+			VS_SESSION_NOTIFY_REMOVE, session);
+
+	device_unregister(&session->dev);
+
+	kobject_put(session->sysfs_entry);
+}
+EXPORT_SYMBOL_GPL(vs_session_unregister);
+
+struct service_unbind_work_struct {
+	struct vs_service_device *service;
+	struct work_struct work;
+};
+
+static void service_unbind_work(struct work_struct *work)
+{
+	struct service_unbind_work_struct *unbind_work = container_of(work,
+			struct service_unbind_work_struct, work);
+
+	device_release_driver(&unbind_work->service->dev);
+
+	/* Matches vs_get_service() in vs_session_unbind_driver() */
+	vs_put_service(unbind_work->service);
+	kfree(unbind_work);
+}
+
+int vs_session_unbind_driver(struct vs_service_device *service)
+{
+	struct service_unbind_work_struct *unbind_work =
+			kmalloc(sizeof(*unbind_work), GFP_KERNEL);
+
+	if (!unbind_work)
+		return -ENOMEM;
+
+	INIT_WORK(&unbind_work->work, service_unbind_work);
+
+	/* Put in service_unbind_work() */
+	unbind_work->service = vs_get_service(service);
+	schedule_work(&unbind_work->work);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_session_unbind_driver);
+
+static int __init vservices_init(void)
+{
+	int r;
+
+	printk(KERN_INFO "vServices Framework 1.0\n");
+
+	vservices_root = kobject_create_and_add("vservices", NULL);
+	if (!vservices_root) {
+		r = -ENOMEM;
+		goto fail_create_root;
+	}
+
+	r = bus_register(&vs_session_bus_type);
+	if (r < 0)
+		goto fail_bus_register;
+
+	r = vs_devio_init();
+	if (r < 0)
+		goto fail_devio_init;
+
+	return 0;
+
+fail_devio_init:
+	bus_unregister(&vs_session_bus_type);
+fail_bus_register:
+	kobject_put(vservices_root);
+fail_create_root:
+	return r;
+}
+
+static void __exit vservices_exit(void)
+{
+	printk(KERN_INFO "vServices Framework exit\n");
+
+	vs_devio_exit();
+	bus_unregister(&vs_session_bus_type);
+	kobject_put(vservices_root);
+}
+
+subsys_initcall(vservices_init);
+module_exit(vservices_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Session");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/session.h b/drivers/vservices/session.h
new file mode 100644
index 0000000..f51d535
--- /dev/null
+++ b/drivers/vservices/session.h
@@ -0,0 +1,173 @@
+/*
+ * drivers/vservices/session.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Definitions related to the vservices session bus and its client and server
+ * session drivers. The interfaces in this file are implementation details of
+ * the vServices framework and should not be used by transport or service
+ * drivers.
+ */
+
+#ifndef _VSERVICES_SESSION_PRIV_H_
+#define _VSERVICES_SESSION_PRIV_H_
+
+/* Maximum number of sessions allowed */
+#define VS_MAX_SESSIONS 64
+
+#include "debug.h"
+
+/* For use by the core server */
+#define VS_SERVICE_AUTO_ALLOCATE_ID	0xffff
+#define VS_SERVICE_ALREADY_RESET	1
+
+/*
+ * The upper bits of the service id are reserved for transport driver specific
+ * use. The reserve bits are always zeroed out above the transport layer.
+ */
+#define VS_SERVICE_ID_TRANSPORT_BITS	4
+#define VS_SERVICE_ID_TRANSPORT_OFFSET	12
+#define VS_SERVICE_ID_TRANSPORT_MASK ((1 << VS_SERVICE_ID_TRANSPORT_BITS) - 1)
+#define VS_SERVICE_ID_MASK \
+	(~(VS_SERVICE_ID_TRANSPORT_MASK << VS_SERVICE_ID_TRANSPORT_OFFSET))
+
+/* Number of bits needed to represent the service id range as a bitmap. */
+#define VS_SERVICE_ID_BITMAP_BITS \
+	(1 << ((sizeof(vs_service_id_t) * 8) - VS_SERVICE_ID_TRANSPORT_BITS))
+
+/* High service ids are reserved for use by the transport drivers */
+#define VS_SERVICE_ID_RESERVED(x) \
+	((1 << VS_SERVICE_ID_TRANSPORT_OFFSET) - (x))
+
+#define VS_SERVICE_ID_RESERVED_1	VS_SERVICE_ID_RESERVED(1)
+
+/* Name of the session device symlink in service device sysfs directory */
+#define VS_SESSION_SYMLINK_NAME		"session"
+
+/* Name of the transport device symlink in session device sysfs directory */
+#define VS_TRANSPORT_SYMLINK_NAME	"transport"
+
+static inline unsigned int
+vs_get_service_id_reserved_bits(vs_service_id_t service_id)
+{
+	return (service_id >> VS_SERVICE_ID_TRANSPORT_OFFSET) &
+			VS_SERVICE_ID_TRANSPORT_MASK;
+}
+
+static inline vs_service_id_t vs_get_real_service_id(vs_service_id_t service_id)
+{
+	return service_id & VS_SERVICE_ID_MASK;
+}
+
+static inline void vs_set_service_id_reserved_bits(vs_service_id_t *service_id,
+		unsigned int reserved_bits)
+{
+	*service_id &= ~(VS_SERVICE_ID_TRANSPORT_MASK <<
+			VS_SERVICE_ID_TRANSPORT_OFFSET);
+	*service_id |= (reserved_bits & VS_SERVICE_ID_TRANSPORT_MASK) <<
+			VS_SERVICE_ID_TRANSPORT_OFFSET;
+}
+
+extern struct bus_type vs_session_bus_type;
+extern struct kobject *vservices_root;
+extern struct kobject *vservices_server_root;
+extern struct kobject *vservices_client_root;
+
+/**
+ * struct vs_session_driver - Session driver
+ * @driver: Linux device model driver structure
+ * @service_bus: Pointer to either the server or client bus type
+ * @is_server: True if this driver is for a server session, false if it is for
+ * a client session
+ * @service_added: Called when a non-core service is added.
+ * @service_start: Called when a non-core service is started.
+ * @service_local_reset: Called when an active non-core service driver becomes
+ * inactive.
+ * @service_removed: Called when a non-core service is removed.
+ */
+struct vs_session_driver {
+	struct device_driver driver;
+	struct bus_type *service_bus;
+	bool is_server;
+
+	/* These are all called with the core service state lock held. */
+	int (*service_added)(struct vs_session_device *session,
+			struct vs_service_device *service);
+	int (*service_start)(struct vs_session_device *session,
+			struct vs_service_device *service);
+	int (*service_local_reset)(struct vs_session_device *session,
+			struct vs_service_device *service);
+	int (*service_removed)(struct vs_session_device *session,
+			struct vs_service_device *service);
+};
+
+#define to_vs_session_driver(drv) \
+	container_of(drv, struct vs_session_driver, driver)
+
+/* Service lookup */
+extern struct vs_service_device * vs_session_get_service(
+		struct vs_session_device *session,
+		vs_service_id_t service_id);
+
+/* Service creation & destruction */
+extern struct vs_service_device *
+vs_service_register(struct vs_session_device *session,
+		struct vs_service_device *parent,
+		vs_service_id_t service_id,
+		const char *protocol,
+		const char *name,
+		const void *plat_data);
+
+extern bool vs_service_start(struct vs_service_device *service);
+
+extern int vs_service_delete(struct vs_service_device *service,
+		struct vs_service_device *caller);
+
+extern int vs_service_handle_delete(struct vs_service_device *service);
+
+/* Service reset handling */
+extern int vs_service_handle_reset(struct vs_session_device *session,
+		vs_service_id_t service_id, bool disable);
+extern int vs_service_enable(struct vs_service_device *service);
+
+extern void vs_session_enable_noncore(struct vs_session_device *session);
+extern void vs_session_disable_noncore(struct vs_session_device *session);
+extern void vs_session_delete_noncore(struct vs_session_device *session);
+
+/* Service bus driver management */
+extern int vs_service_bus_probe(struct device *dev);
+extern int vs_service_bus_remove(struct device *dev);
+extern int vs_service_bus_uevent(struct device *dev,
+		struct kobj_uevent_env *env);
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+
+extern int vs_devio_init(void);
+extern void vs_devio_exit(void);
+
+extern struct vs_service_device *vs_service_lookup_by_devt(dev_t dev);
+
+extern struct vs_service_driver vs_devio_server_driver;
+extern struct vs_service_driver vs_devio_client_driver;
+
+extern int vservices_cdev_major;
+
+#else /* !CONFIG_VSERVICES_CHAR_DEV */
+
+static inline int vs_devio_init(void)
+{
+	return 0;
+}
+
+static inline void vs_devio_exit(void)
+{
+}
+
+#endif /* !CONFIG_VSERVICES_CHAR_DEV */
+
+#endif /* _VSERVICES_SESSION_PRIV_H_ */
diff --git a/drivers/vservices/skeleton_driver.c b/drivers/vservices/skeleton_driver.c
new file mode 100644
index 0000000..cfbc5df
--- /dev/null
+++ b/drivers/vservices/skeleton_driver.c
@@ -0,0 +1,133 @@
+/*
+ * drivers/vservices/skeleton_driver.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Skeleton testing driver for templating vService client/server drivers
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+struct skeleton_info {
+	unsigned dummy;
+};
+
+static void vs_skeleton_handle_start(struct vs_service_device *service)
+{
+	/* NOTE: Do not change this message - is it used for system testing */
+	dev_info(&service->dev, "skeleton handle_start\n");
+}
+
+static int vs_skeleton_handle_message(struct vs_service_device *service,
+					  struct vs_mbuf *mbuf)
+{
+	dev_info(&service->dev, "skeleton handle_messasge\n");
+	return -EBADMSG;
+}
+
+static void vs_skeleton_handle_notify(struct vs_service_device *service,
+					  u32 flags)
+{
+	dev_info(&service->dev, "skeleton handle_notify\n");
+}
+
+static void vs_skeleton_handle_reset(struct vs_service_device *service)
+{
+	dev_info(&service->dev, "skeleton handle_reset %s service %d\n",
+			service->is_server ? "server" : "client", service->id);
+}
+
+static int vs_skeleton_probe(struct vs_service_device *service)
+{
+	struct skeleton_info *info;
+	int err = -ENOMEM;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		goto fail;
+
+	dev_set_drvdata(&service->dev, info);
+	return 0;
+
+fail:
+	return err;
+}
+
+static int vs_skeleton_remove(struct vs_service_device *service)
+{
+	struct skeleton_info *info = dev_get_drvdata(&service->dev);
+
+	dev_info(&service->dev, "skeleton remove\n");
+	kfree(info);
+	return 0;
+}
+
+static struct vs_service_driver server_skeleton_driver = {
+	.protocol	= "com.ok-labs.skeleton",
+	.is_server	= true,
+	.probe		= vs_skeleton_probe,
+	.remove		= vs_skeleton_remove,
+	.start		= vs_skeleton_handle_start,
+	.receive	= vs_skeleton_handle_message,
+	.notify		= vs_skeleton_handle_notify,
+	.reset		= vs_skeleton_handle_reset,
+	.driver		= {
+		.name		= "vs-server-skeleton",
+		.owner		= THIS_MODULE,
+		.bus		= &vs_server_bus_type,
+	},
+};
+
+static struct vs_service_driver client_skeleton_driver = {
+	.protocol	= "com.ok-labs.skeleton",
+	.is_server	= false,
+	.probe		= vs_skeleton_probe,
+	.remove		= vs_skeleton_remove,
+	.start		= vs_skeleton_handle_start,
+	.receive	= vs_skeleton_handle_message,
+	.notify		= vs_skeleton_handle_notify,
+	.reset		= vs_skeleton_handle_reset,
+	.driver		= {
+		.name		= "vs-client-skeleton",
+		.owner		= THIS_MODULE,
+		.bus		= &vs_client_bus_type,
+	},
+};
+
+static int __init vs_skeleton_init(void)
+{
+	int ret;
+
+	ret = driver_register(&server_skeleton_driver.driver);
+	if (ret)
+		return ret;
+
+	ret = driver_register(&client_skeleton_driver.driver);
+	if (ret)
+		driver_unregister(&server_skeleton_driver.driver);
+
+	return ret;
+}
+
+static void __exit vs_skeleton_exit(void)
+{
+	driver_unregister(&server_skeleton_driver.driver);
+	driver_unregister(&client_skeleton_driver.driver);
+}
+
+module_init(vs_skeleton_init);
+module_exit(vs_skeleton_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Skeleton Client/Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/transport.h b/drivers/vservices/transport.h
new file mode 100644
index 0000000..8e5055c
--- /dev/null
+++ b/drivers/vservices/transport.h
@@ -0,0 +1,40 @@
+/*
+ * include/vservices/transport.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the private interface that vServices transport drivers
+ * must provide to the vservices session and protocol layers. The transport,
+ * transport vtable, and message buffer structures are defined in the public
+ * <vservices/transport.h> header.
+ */
+
+#ifndef _VSERVICES_TRANSPORT_PRIV_H_
+#define _VSERVICES_TRANSPORT_PRIV_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+
+/**
+ * struct vs_notify_info - Notification information stored in the transport
+ * @service_id: Service id for this notification info
+ * @offset: Offset into the notification mapping
+ */
+struct vs_notify_info {
+	vs_service_id_t service_id;
+	unsigned offset;
+};
+
+#define VS_MAX_SERVICES		128
+#define VS_MAX_SERVICE_ID	(VS_MAX_SERVICES - 1)
+
+#endif /* _VSERVICES_TRANSPORT_PRIV_H_ */
diff --git a/drivers/vservices/transport/Kconfig b/drivers/vservices/transport/Kconfig
new file mode 100644
index 0000000..cd1c97c
--- /dev/null
+++ b/drivers/vservices/transport/Kconfig
@@ -0,0 +1,7 @@
+#
+# vServices Transport driver configuration
+#
+
+menu "Transport drivers"
+
+endmenu
diff --git a/drivers/vservices/transport/Makefile b/drivers/vservices/transport/Makefile
new file mode 100644
index 0000000..ae1c943
--- /dev/null
+++ b/drivers/vservices/transport/Makefile
@@ -0,0 +1,2 @@
+ccflags-y += -Werror
+ccflags-$(CONFIG_VSERVICES_DEBUG) += -DDEBUG