net: wireless: implements host target communication with QCA402x

This driver enables support for communication beteween
an APQ8053 Host and Qualcomm's QCA402x wireless SoC.

Change-Id: I777f89b95094ad1cd72f515bb43a198502d0a12f
Signed-off-by: Evgeniy Borisov <gencho@codeaurora.org>
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index bb2270b..c915ded 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -114,3 +114,5 @@
 source "drivers/net/wireless/cnss_genl/Kconfig"
 
 endif # WLAN
+
+source "drivers/net/wireless/qca402x/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 917a876..6cf62b66 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -32,3 +32,4 @@
 obj-$(CONFIG_CNSS_UTILS) += cnss_utils/
 obj-$(CONFIG_CNSS_GENL) += cnss_genl/
 obj-$(CONFIG_CNSS_CRYPTO) += cnss_crypto/
+obj-$(CONFIG_QCA402X) += qca402x/
diff --git a/drivers/net/wireless/qca402x/Kconfig b/drivers/net/wireless/qca402x/Kconfig
new file mode 100644
index 0000000..bae2a49
--- /dev/null
+++ b/drivers/net/wireless/qca402x/Kconfig
@@ -0,0 +1,10 @@
+config QCA402X
+	tristate "Qualcomm QCA402X wireless support"
+	default n
+	---help---
+	Software for Qualcomm QCA402x including HIF and HTCA.
+
+	Say Y here if support for Qualcomm's QCA402x wireless SoC
+	via host-target communication protocol is required.
+	Say N to disable completely if support for that device is
+	not needed or if not sure.
diff --git a/drivers/net/wireless/qca402x/Makefile b/drivers/net/wireless/qca402x/Makefile
new file mode 100644
index 0000000..c052f73
--- /dev/null
+++ b/drivers/net/wireless/qca402x/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_QCA402X) += htca_mbox/htca_mbox.o
+obj-$(CONFIG_QCA402X) += htca_mbox/htca_mbox_compl.o
+obj-$(CONFIG_QCA402X) += htca_mbox/htca_mbox_events.o
+obj-$(CONFIG_QCA402X) += htca_mbox/htca_mbox_intr.o
+obj-$(CONFIG_QCA402X) += htca_mbox/htca_mbox_recv.o
+obj-$(CONFIG_QCA402X) += htca_mbox/htca_mbox_send.o
+obj-$(CONFIG_QCA402X) += htca_mbox/htca_mbox_task.o
+obj-$(CONFIG_QCA402X) += htca_mbox/htca_mbox_utils.o
+obj-$(CONFIG_QCA402X) += hif_sdio/hif.o
diff --git a/drivers/net/wireless/qca402x/README.txt b/drivers/net/wireless/qca402x/README.txt
new file mode 100644
index 0000000..50873a8
--- /dev/null
+++ b/drivers/net/wireless/qca402x/README.txt
@@ -0,0 +1,52 @@
+This directory contains support to communicate beteween an APQ8053 Host
+and Qualcomm's QCA402x wireless SoC.
+
+QCA4020 SoC supports
+    802.11 (i.e. WiFi/WLAN)
+    802.15.4 (i.e. Zigbee, Thread)
+    BT LE
+
+Contents of this directory may eventually include:
+	cfg80211 support
+	SoftMAC wireless driver
+	Perhaps a mac80211 driver
+	Zigbee APIs
+	Thread APIs
+	BT APIs
+
+For now, all that is present are the bottommost layers of a communication stack:
+
+	HTCA - Host/Target Communications protocol
+		htca_mbox
+		    Quartz SDIO/SPI address space
+		    Quartz mailboxes and associated SDIO/SPI registers
+		    Quartz mbox credit-based flow control
+		htca_uart (TBD)
+
+	HIF - a shim layer which abstracts the underlying Master/Host-side
+		interconnect controller (e.g. SDIO controller) to provide
+		an interconnect-independent API for use by HTCA.
+		hif_sdio
+			Host Interface layer for SDIO Master controllers
+		hif_spi (TBD)
+			Host Interface layer for SPI Master controllers
+		hif_uart (TBD)
+			Host Interface layer for UART-based controllers
+
+	qrtzdev-a simple driver used for HTCA TESTING.
+
+Note: The initial implementation supports HTCA Protocol Version 1 over SDIO.
+It is based on previous HTCA implementations for Atheros SoCs, but uses a
+revised design which appropriately leverages kernel threads.
+
+This implementation is likely to evolve with increasing focus on performance,
+especially for use cases of current interest such as streaming video from
+Host over SDIO to WLAN; however this evolution may differ from the existing
+implementation of HTCA Protocol Version 2 used by earlier Atheros SoC's.
+
+However there are several issues with this code:
+  it is based on HTCA v2 protocol which adds complexity
+  it is based on a non-threaded design, originally for a non-threaded RTOS
+TBD: Ideally, these two implementations ought to be merged so that the resulting
+implementation is based on a proper threaded design and supports both HTCA
+protocol v1 and v2.
diff --git a/drivers/net/wireless/qca402x/hif_sdio/hif.c b/drivers/net/wireless/qca402x/hif_sdio/hif.c
new file mode 100644
index 0000000..56d3b95
--- /dev/null
+++ b/drivers/net/wireless/qca402x/hif_sdio/hif.c
@@ -0,0 +1,1230 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This file was originally distributed by Qualcomm Atheros, Inc.
+ * before Copyright ownership was assigned to the Linux Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "hif_internal.h"
+#include "hif.h"
+
+#if defined(DEBUG)
+#define hifdebug(fmt, a...)\
+	pr_err("hif %s:%d: " fmt, __func__, __LINE__, ##a)
+#else
+#define hifdebug(args...)
+#endif
+
+#define MAX_HIF_DEVICES 2
+#define ENABLE_SDIO_TIMEOUT 100 /* ms */
+
+static unsigned int hif_mmcbuswidth;
+EXPORT_SYMBOL(hif_mmcbuswidth);
+module_param(hif_mmcbuswidth, uint, 0644);
+MODULE_PARM_DESC(hif_mmcbuswidth, "Set MMC driver Bus Width: 1-1Bit, 4-4Bit, 8-8Bit");
+
+static unsigned int hif_mmcclock;
+EXPORT_SYMBOL(hif_mmcclock);
+module_param(hif_mmcclock, uint, 0644);
+MODULE_PARM_DESC(hif_mmcclock, "Set MMC driver Clock value");
+
+static unsigned int hif_writecccr1;
+module_param(hif_writecccr1, uint, 0644);
+static unsigned int hif_writecccr1value;
+module_param(hif_writecccr1value, uint, 0644);
+
+static unsigned int hif_writecccr2;
+module_param(hif_writecccr2, uint, 0644);
+static unsigned int hif_writecccr2value;
+module_param(hif_writecccr2value, uint, 0644);
+
+static unsigned int hif_writecccr3;
+module_param(hif_writecccr3, uint, 0644);
+static unsigned int hif_writecccr3value;
+module_param(hif_writecccr3value, uint, 0644);
+
+static unsigned int hif_writecccr4;
+module_param(hif_writecccr4, uint, 0644);
+
+static unsigned int hif_writecccr4value;
+module_param(hif_writecccr4value, uint, 0644);
+
+static int hif_device_inserted(struct sdio_func *func,
+			       const struct sdio_device_id *id);
+static void hif_device_removed(struct sdio_func *func);
+static void *add_hif_device(struct sdio_func *func);
+static struct hif_device *get_hif_device(struct sdio_func *func);
+static void del_hif_device(struct hif_device *device);
+static int func0_CMD52_write_byte(struct mmc_card *card, unsigned int address,
+				  unsigned char byte);
+static int func0_CMD52_read_byte(struct mmc_card *card, unsigned int address,
+				 unsigned char *byte);
+static void hif_stop_hif_task(struct hif_device *device);
+static struct bus_request *hif_allocate_bus_request(void *device);
+static void hif_free_bus_request(struct hif_device *device,
+				 struct bus_request *busrequest);
+static void hif_add_to_req_list(struct hif_device *device,
+				struct bus_request *busrequest);
+
+static int hif_reset_sdio_on_unload;
+module_param(hif_reset_sdio_on_unload, int, 0644);
+
+static u32 hif_forcedriverstrength = 1; /* force driver strength to type D */
+
+static const struct sdio_device_id hif_sdio_id_table[] = {
+	{SDIO_DEVICE(SDIO_ANY_ID,
+	SDIO_ANY_ID)}, /* QCA402x IDs are hardwired to 0 */
+	{/* null */},
+};
+
+MODULE_DEVICE_TABLE(sdio, hif_sdio_id_table);
+
+static struct sdio_driver hif_sdio_driver = {
+	.name = "hif_sdio",
+	.id_table = hif_sdio_id_table,
+	.probe = hif_device_inserted,
+	.remove = hif_device_removed,
+};
+
+/* make sure we unregister only when registered. */
+/* TBD: synchronization needed.... */
+/* device->completion_task, registered, ... */
+static int registered;
+
+static struct cbs_from_os hif_callbacks;
+
+static struct hif_device *hif_devices[MAX_HIF_DEVICES];
+
+static int hif_disable_func(struct hif_device *device, struct sdio_func *func);
+static int hif_enable_func(struct hif_device *device, struct sdio_func *func);
+
+static int hif_sdio_register_driver(struct cbs_from_os *callbacks)
+{
+	/* store the callback handlers */
+	hif_callbacks = *callbacks; /* structure copy */
+
+	/* Register with bus driver core */
+	registered++;
+
+	return sdio_register_driver(&hif_sdio_driver);
+}
+
+static void hif_sdio_unregister_driver(void)
+{
+	sdio_unregister_driver(&hif_sdio_driver);
+	registered--;
+}
+
+int hif_init(struct cbs_from_os *callbacks)
+{
+	int status;
+
+	hifdebug("Enter\n");
+	if (!callbacks)
+		return HIF_ERROR;
+
+	hifdebug("calling hif_sdio_register_driver\n");
+	status = hif_sdio_register_driver(callbacks);
+	hifdebug("hif_sdio_register_driver returns %d\n", status);
+	if (status != 0)
+		return HIF_ERROR;
+
+	return HIF_OK;
+}
+
+static int __hif_read_write(struct hif_device *device, u32 address,
+			    u8 *buffer, u32 length,
+			    u32 request, void *context)
+{
+	u8 opcode;
+	int status = HIF_OK;
+	int ret = 0;
+	u8 temp[4];
+
+	if (!device || !device->func)
+		return HIF_ERROR;
+
+	if (!buffer)
+		return HIF_EINVAL;
+
+	if (length == 0)
+		return HIF_EINVAL;
+
+	do {
+		if (!(request & HIF_EXTENDED_IO)) {
+			status = HIF_EINVAL;
+			break;
+		}
+
+		if (request & HIF_BLOCK_BASIS) {
+			if (WARN_ON(length & (HIF_MBOX_BLOCK_SIZE - 1)))
+				return HIF_EINVAL;
+		} else if (request & HIF_BYTE_BASIS) {
+		} else {
+			status = HIF_EINVAL;
+			break;
+		}
+
+		if (request & HIF_FIXED_ADDRESS) {
+			opcode = CMD53_FIXED_ADDRESS;
+		} else if (request & HIF_INCREMENTAL_ADDRESS) {
+			opcode = CMD53_INCR_ADDRESS;
+		} else {
+			status = HIF_EINVAL;
+			break;
+		}
+
+		if (request & HIF_WRITE) {
+			if (opcode == CMD53_FIXED_ADDRESS) {
+				/* TBD: Why special handling? */
+				if (length == 1) {
+					memset(temp, *buffer, 4);
+					ret = sdio_writesb(device->func,
+							   address, temp, 4);
+				} else {
+					ret =
+					    sdio_writesb(device->func, address,
+							 buffer, length);
+				}
+			} else {
+				ret = sdio_memcpy_toio(device->func, address,
+						       buffer, length);
+			}
+		} else if (request & HIF_READ) {
+			if (opcode == CMD53_FIXED_ADDRESS) {
+				if (length ==
+				    1) { /* TBD: Why special handling? */
+					memset(temp, 0, 4);
+					ret = sdio_readsb(device->func, temp,
+							  address, 4);
+					buffer[0] = temp[0];
+				} else {
+					ret = sdio_readsb(device->func, buffer,
+							  address, length);
+				}
+			} else {
+				ret = sdio_memcpy_fromio(device->func, buffer,
+							 address, length);
+			}
+		} else {
+			status = HIF_EINVAL; /* Neither read nor write */
+			break;
+		}
+
+		if (ret) {
+			hifdebug("SDIO op returns %d\n", ret);
+			status = HIF_ERROR;
+		}
+	} while (false);
+
+	return status;
+}
+
+/* Add busrequest to tail of sdio_request request list */
+static void hif_add_to_req_list(struct hif_device *device,
+				struct bus_request *busrequest)
+{
+	unsigned long flags;
+
+	busrequest->next = NULL;
+
+	spin_lock_irqsave(&device->req_qlock, flags);
+	if (device->req_qhead)
+		device->req_qtail->next = (void *)busrequest;
+	else
+		device->req_qhead = busrequest;
+	device->req_qtail = busrequest;
+	spin_unlock_irqrestore(&device->req_qlock, flags);
+}
+
+int hif_sync_read(void *hif_device, u32 address, u8 *buffer,
+		  u32 length, u32 request, void *context)
+{
+	int status;
+	struct hif_device *device = (struct hif_device *)hif_device;
+
+	if (!device || !device->func)
+		return HIF_ERROR;
+
+	sdio_claim_host(device->func);
+	status = __hif_read_write(device, address, buffer, length,
+				  request & ~HIF_SYNCHRONOUS, NULL);
+	sdio_release_host(device->func);
+	return status;
+}
+
+/* Queue a read/write request and optionally wait for it to complete. */
+int hif_read_write(void *hif_device, u32 address, void *buffer,
+		   u32 length, u32 req_type, void *context)
+{
+	struct bus_request *busrequest;
+	int status;
+	struct hif_device *device = (struct hif_device *)hif_device;
+
+	if (!device || !device->func)
+		return HIF_ERROR;
+
+	if (!(req_type & HIF_ASYNCHRONOUS) && !(req_type & HIF_SYNCHRONOUS))
+		return HIF_EINVAL;
+
+	/* Serialize all requests through the reqlist and HIFtask */
+	busrequest = hif_allocate_bus_request(device);
+	if (!busrequest)
+		return HIF_ERROR;
+
+	/* TBD: caller may pass buffers ON THE STACK, especially 4 Byte buffers.
+	 * If this is a problem on some platforms/drivers, this is one
+	 * reasonable
+	 * place to handle it. If poentially using DMA
+	 * reject large buffers on stack
+	 * copy 4B buffers allow register writes (no DMA)
+	 */
+
+	busrequest->address = address;
+	busrequest->buffer = buffer;
+	busrequest->length = length;
+	busrequest->req_type = req_type;
+	busrequest->context = context;
+
+	hif_add_to_req_list(device, busrequest);
+	device->hif_task_work = 1;
+	wake_up(&device->hif_wait); /* Notify HIF task */
+
+	if (req_type & HIF_ASYNCHRONOUS)
+		return HIF_PENDING;
+
+	/* Synchronous request -- wait for completion. */
+	wait_for_completion(&busrequest->comp_req);
+	status = busrequest->status;
+	hif_free_bus_request(device, busrequest);
+	return status;
+}
+
+/* add_to_completion_list() - Queue a completed request
+ * @device:    context to the hif device.
+ * @comple: SDIO bus access request.
+ *
+ * This function adds an sdio bus access request to the
+ * completion list.
+ *
+ * Return: No return.
+ */
+static void add_to_completion_list(struct hif_device *device,
+				   struct bus_request *comple)
+{
+	unsigned long flags;
+
+	comple->next = NULL;
+
+	spin_lock_irqsave(&device->compl_qlock, flags);
+	if (device->compl_qhead)
+		device->compl_qtail->next = (void *)comple;
+	else
+		device->compl_qhead = comple;
+
+	device->compl_qtail = comple;
+	spin_unlock_irqrestore(&device->compl_qlock, flags);
+}
+
+/* process_completion_list() - Remove completed requests from
+ * the completion list, and invoke the corresponding callbacks.
+ *
+ * @device:  HIF device handle.
+ *
+ * Function to clean the completion list.
+ *
+ * Return: No
+ */
+static void process_completion_list(struct hif_device *device)
+{
+	unsigned long flags;
+	struct bus_request *next_comple;
+	struct bus_request *request;
+
+	/* Pull the entire chain of completions from the list */
+	spin_lock_irqsave(&device->compl_qlock, flags);
+	request = device->compl_qhead;
+	device->compl_qhead = NULL;
+	device->compl_qtail = NULL;
+	spin_unlock_irqrestore(&device->compl_qlock, flags);
+
+	while (request) {
+		int status;
+		void *context;
+
+		hifdebug("HIF top of loop\n");
+		next_comple = (struct bus_request *)request->next;
+
+		status = request->status;
+		context = request->context;
+		hif_free_bus_request(device, request);
+		device->cbs_from_hif.rw_completion_hdl(context, status);
+
+		request = next_comple;
+	}
+}
+
+/* completion_task() - Thread to process request completions
+ *
+ * @param:   context to the hif device.
+ *
+ * Completed asynchronous requests are added to a completion
+ * queue where they are processed by this task. This serves
+ * multiple purposes:
+ * -minimizes processing by the HIFTask, which allows
+ *	that task to keep SDIO busy
+ * -allows request processing to be parallelized on
+ *	multiprocessor systems
+ * -provides a suspendable context for use by the
+ *	caller's callback function, though this should
+ *	not be abused since it will cause requests to
+ *	sit on the completion queue (which makes us
+ *	more likely to exhaust free requests).
+ *
+ * Return: 0 thread exits
+ */
+static int completion_task(void *param)
+{
+	struct hif_device *device;
+
+	device = (struct hif_device *)param;
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	for (;;) {
+		hifdebug("HIF top of loop\n");
+		wait_event_interruptible(device->completion_wait,
+					 device->completion_work);
+		if (!device->completion_work)
+			break;
+
+		if (device->completion_shutdown)
+			break;
+
+		device->completion_work = 0;
+		process_completion_list(device);
+	}
+
+	/* Process any remaining completions.
+	 * This task should not be shut down
+	 * until after all requests are stopped.
+	 */
+	process_completion_list(device);
+
+	complete_and_exit(&device->completion_exit, 0);
+	return 0;
+}
+
+/* hif_request_complete() - Completion processing after a request
+ * is processed.
+ *
+ * @device:    device handle.
+ * @request:   SIDO bus access request.
+ *
+ * All completed requests are queued onto a completion list
+ * which is processed by complete_task.
+ *
+ * Return: None.
+ */
+static inline void hif_request_complete(struct hif_device *device,
+					struct bus_request *request)
+{
+	add_to_completion_list(device, request);
+	device->completion_work = 1;
+	wake_up(&device->completion_wait);
+}
+
+/* hif_stop_completion_thread() - Destroy the completion task
+ * @device: device handle.
+ *
+ * This function will destroy the completion thread.
+ *
+ * Return: None.
+ */
+static inline void hif_stop_completion_thread(struct hif_device *device)
+{
+	if (device->completion_task) {
+		init_completion(&device->completion_exit);
+		device->completion_shutdown = 1;
+
+		device->completion_work = 1;
+		wake_up(&device->completion_wait);
+		wait_for_completion(&device->completion_exit);
+		device->completion_task = NULL;
+	}
+}
+
+/* This task tries to keep the SDIO bus as busy as it
+ * can. It pulls both requests off the request queue and
+ * it uses the underlying sdio API to make them happen.
+ *
+ * Requests may be one of
+ * synchronous (a thread is suspended until it completes)
+ * asynchronous (a completion callback will be invoked)
+ * and one of
+ * reads (from Target SDIO space into Host RAM)
+ * writes (from Host RAM into Target SDIO space)
+ * and it is to one of
+ * Target's mailbox space
+ * Target's register space
+ * and lots of other choices.
+ */
+static int hif_task(void *param)
+{
+	struct hif_device *device;
+	struct bus_request *request;
+	int status;
+	unsigned long flags;
+
+	set_user_nice(current, -3);
+	device = (struct hif_device *)param;
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	for (;;) {
+		hifdebug("top of loop\n");
+		/* wait for work */
+		wait_event_interruptible(device->hif_wait,
+					 device->hif_task_work);
+		if (!device->hif_task_work)
+			/* interrupted, exit */
+			break;
+
+		if (device->hif_shutdown)
+			break;
+
+		device->hif_task_work = 0;
+
+		/* We want to hold the host over multiple cmds if possible;
+		 * but holding the host blocks card interrupts.
+		 */
+		sdio_claim_host(device->func);
+
+		for (;;) {
+			hifdebug("pull next request\n");
+			/* Pull the next request to work on */
+			spin_lock_irqsave(&device->req_qlock, flags);
+			request = device->req_qhead;
+			if (!request) {
+				spin_unlock_irqrestore(&device->req_qlock,
+						       flags);
+				break;
+			}
+
+			/* Remove request from queue */
+			device->req_qhead = (struct bus_request *)request->next;
+			/* Note: No need to clean up req_qtail */
+
+			spin_unlock_irqrestore(&device->req_qlock, flags);
+
+			/* call __hif_read_write to do the work */
+			hifdebug("before HIFRW: address=0x%08x buffer=0x%pK\n",
+				 request->address, request->buffer);
+			hifdebug("before HIFRW: length=%d req_type=0x%08x\n",
+				 request->length, request->req_type);
+
+			if (request->req_type & HIF_WRITE) {
+				int i;
+				int dbgcount;
+
+				if (request->length <= 16)
+					dbgcount = request->length;
+				else
+					dbgcount = 16;
+
+				for (i = 0; i < dbgcount; i++)
+					hifdebug("|0x%02x", request->buffer[i]);
+				hifdebug("\n");
+			}
+			status = __hif_read_write(
+			    device, request->address, request->buffer,
+			    request->length,
+			    request->req_type & ~HIF_SYNCHRONOUS, NULL);
+			hifdebug("after HIFRW: address=0x%08x buffer=0x%pK\n",
+				 request->address, request->buffer);
+			hifdebug("after HIFRW: length=%d req_type=0x%08x\n",
+				 request->length, request->req_type);
+
+			if (request->req_type & HIF_READ) {
+				int i;
+				int dbgcount;
+
+				if (request->length <= 16)
+					dbgcount = request->length;
+				else
+					dbgcount = 16;
+
+				for (i = 0; i < dbgcount; i++)
+					hifdebug("|0x%02x", request->buffer[i]);
+				hifdebug("\n");
+			}
+
+			/* When we return, the read/write is done */
+			request->status = status;
+
+			if (request->req_type & HIF_ASYNCHRONOUS)
+				hif_request_complete(device, request);
+			else
+				/* notify thread that's waiting on this request
+				 */
+				complete(&request->comp_req);
+		}
+		sdio_release_host(device->func);
+	}
+
+	complete_and_exit(&device->hif_exit, 0);
+	return 0;
+}
+
+int hif_configure_device(void *hif_device,
+			 enum hif_device_config_opcode opcode,
+			 void *config, u32 config_len)
+{
+	int status = HIF_OK;
+	struct hif_device *device = (struct hif_device *)hif_device;
+
+	switch (opcode) {
+	case HIF_DEVICE_GET_MBOX_BLOCK_SIZE:
+		((u32 *)config)[0] = HIF_MBOX0_BLOCK_SIZE;
+		((u32 *)config)[1] = HIF_MBOX1_BLOCK_SIZE;
+		((u32 *)config)[2] = HIF_MBOX2_BLOCK_SIZE;
+		((u32 *)config)[3] = HIF_MBOX3_BLOCK_SIZE;
+		break;
+
+	case HIF_DEVICE_SET_CONTEXT:
+		device->context = config;
+		break;
+
+	case HIF_DEVICE_GET_CONTEXT:
+		if (!config)
+			return HIF_ERROR;
+		*(void **)config = device->context;
+		break;
+
+	default:
+		status = HIF_ERROR;
+	}
+
+	return status;
+}
+
+void hif_shutdown_device(void *device)
+{
+	if (!device) {
+		int i;
+		/* since we are unloading the driver, reset all cards
+		 * in case the SDIO card is externally powered and we
+		 * are unloading the SDIO stack.  This avoids the problem
+		 * when the SDIO stack is reloaded and attempts are made
+		 * to re-enumerate a card that is already enumerated.
+		 */
+
+		/* Unregister with bus driver core */
+		if (registered) {
+			registered = 0;
+			hif_sdio_unregister_driver();
+			WARN_ON(1);
+			return;
+		}
+
+		for (i = 0; i < MAX_HIF_DEVICES; ++i) {
+			if (hif_devices[i] && !hif_devices[i]->func) {
+				del_hif_device(hif_devices[i]);
+				hif_devices[i] = NULL;
+			}
+		}
+	}
+}
+
+static void hif_irq_handler(struct sdio_func *func)
+{
+	int status;
+	struct hif_device *device;
+
+	device = get_hif_device(func);
+	device->irq_handling = 1;
+	/* release the host during ints so we can pick it back up when we
+	 * process cmds
+	 */
+	sdio_release_host(device->func);
+	status = device->cbs_from_hif.dsr_hdl(device->cbs_from_hif.context);
+	sdio_claim_host(device->func);
+	device->irq_handling = 0;
+}
+
+static void hif_force_driver_strength(struct sdio_func *func)
+{
+	unsigned int addr = SDIO_CCCR_DRIVE_STRENGTH;
+	unsigned char value = 0;
+
+	if (func0_CMD52_read_byte(func->card, addr, &value))
+		goto cmd_fail;
+
+	value = (value & (~(SDIO_DRIVE_DTSx_MASK << SDIO_DRIVE_DTSx_SHIFT))) |
+			SDIO_DTSx_SET_TYPE_D;
+	if (func0_CMD52_write_byte(func->card, addr, value))
+		goto cmd_fail;
+
+	addr = CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR;
+	value = 0;
+	if (func0_CMD52_read_byte(func->card, addr, &value))
+		goto cmd_fail;
+
+	value = (value & (~CCCR_SDIO_DRIVER_STRENGTH_ENABLE_MASK)) |
+			CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
+			CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
+			CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D;
+	if (func0_CMD52_write_byte(func->card, addr, value))
+		goto cmd_fail;
+	return;
+cmd_fail:
+	hifdebug("set fail\n");
+}
+
+static int hif_set_mmc_buswidth(struct sdio_func *func,
+				struct hif_device *device)
+{
+	int ret = -1;
+
+	if (hif_mmcbuswidth == 1) {
+		ret = func0_CMD52_write_byte(func->card, SDIO_CCCR_IF,
+					     SDIO_BUS_CD_DISABLE |
+					     SDIO_BUS_WIDTH_1BIT);
+		if (ret)
+			return ret;
+		device->host->ios.bus_width = MMC_BUS_WIDTH_1;
+		device->host->ops->set_ios(device->host, &device->host->ios);
+	} else if (hif_mmcbuswidth == 4 &&
+		   (device->host->caps & MMC_CAP_4_BIT_DATA)) {
+		ret = func0_CMD52_write_byte(func->card, SDIO_CCCR_IF,
+					     SDIO_BUS_CD_DISABLE |
+					     SDIO_BUS_WIDTH_4BIT);
+		if (ret)
+			return ret;
+		device->host->ios.bus_width = MMC_BUS_WIDTH_4;
+		device->host->ops->set_ios(device->host, &device->host->ios);
+	}
+#ifdef SDIO_BUS_WIDTH_8BIT
+	else if (hif_mmcbuswidth == 8 &&
+		 (device->host->caps & MMC_CAP_8_BIT_DATA)) {
+		ret = func0_CMD52_write_byte(func->card, SDIO_CCCR_IF,
+					     SDIO_BUS_CD_DISABLE |
+					     SDIO_BUS_WIDTH_8BIT);
+		if (ret)
+			return ret;
+		device->host->ios.bus_width = MMC_BUS_WIDTH_8;
+		device->host->ops->set_ios(device->host, &device->host->ios);
+	}
+#endif /* SDIO_BUS_WIDTH_8BIT */
+	return ret;
+}
+
+static int hif_device_inserted(struct sdio_func *func,
+			       const struct sdio_device_id *id)
+{
+	int i;
+	int ret = -1;
+	struct hif_device *device = NULL;
+	int count;
+
+	hifdebug("Enter\n");
+
+	/* dma_mask should be populated here.
+	 * Use the parent device's setting.
+	 */
+	func->dev.dma_mask = mmc_dev(func->card->host)->dma_mask;
+
+	if (!add_hif_device(func))
+		return ret;
+	device = get_hif_device(func);
+
+	for (i = 0; i < MAX_HIF_DEVICES; ++i) {
+		if (!hif_devices[i]) {
+			hif_devices[i] = device;
+			break;
+		}
+	}
+	if (WARN_ON(i >= MAX_HIF_DEVICES))
+		return ret;
+
+	device->id = id;
+	device->host = func->card->host;
+	device->is_enabled = false;
+
+	{
+		u32 clock, clock_set = SDIO_CLOCK_FREQUENCY_DEFAULT;
+
+		sdio_claim_host(func);
+
+		/* force driver strength to type D */
+		if (hif_forcedriverstrength == 1)
+			hif_force_driver_strength(func);
+
+		if (hif_writecccr1)
+			(void)func0_CMD52_write_byte(func->card, hif_writecccr1,
+						     hif_writecccr1value);
+		if (hif_writecccr2)
+			(void)func0_CMD52_write_byte(func->card, hif_writecccr2,
+						     hif_writecccr2value);
+		if (hif_writecccr3)
+			(void)func0_CMD52_write_byte(func->card, hif_writecccr3,
+						     hif_writecccr3value);
+		if (hif_writecccr4)
+			(void)func0_CMD52_write_byte(func->card, hif_writecccr4,
+						     hif_writecccr4value);
+		/* Set MMC Clock */
+		if (hif_mmcclock > 0)
+			clock_set = hif_mmcclock;
+		if (mmc_card_hs(func->card))
+			clock = 50000000;
+		else
+			clock = func->card->cis.max_dtr;
+		if (clock > device->host->f_max)
+			clock = device->host->f_max;
+		hifdebug("clock is %d", clock);
+
+		/* only when hif_mmcclock module parameter is specified,
+		 * set the clock explicitly
+		 */
+		if (hif_mmcclock > 0) {
+			device->host->ios.clock = clock_set;
+			device->host->ops->set_ios(device->host,
+						   &device->host->ios);
+		}
+		/* Set MMC Bus Width: 1-1Bit, 4-4Bit, 8-8Bit */
+		if (hif_mmcbuswidth > 0)
+			ret = hif_set_mmc_buswidth(func, device);
+
+		sdio_release_host(func);
+	}
+
+	spin_lock_init(&device->req_free_qlock);
+	spin_lock_init(&device->req_qlock);
+
+	/* Initialize the bus requests to be used later */
+	memset(device->bus_request, 0, sizeof(device->bus_request));
+	for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) {
+		init_completion(&device->bus_request[count].comp_req);
+		hif_free_bus_request(device, &device->bus_request[count]);
+	}
+	init_waitqueue_head(&device->hif_wait);
+	spin_lock_init(&device->compl_qlock);
+	init_waitqueue_head(&device->completion_wait);
+
+	ret = hif_enable_func(device, func);
+	if ((ret == HIF_OK) || (ret == HIF_PENDING)) {
+		hifdebug("Function is ENABLED");
+		return 0;
+	}
+
+	for (i = 0; i < MAX_HIF_DEVICES; i++) {
+		if (hif_devices[i] == device) {
+			hif_devices[i] = NULL;
+			break;
+		}
+	}
+	sdio_set_drvdata(func, NULL);
+	del_hif_device(device);
+	return ret;
+}
+
+void hif_un_mask_interrupt(void *hif_device)
+{
+	struct hif_device *device = (struct hif_device *)hif_device;
+
+	if (!device || !device->func)
+		return;
+
+	/* Unmask our function IRQ */
+	sdio_claim_host(device->func);
+	device->func->card->host->ops->enable_sdio_irq(device->func->card->host,
+						       1);
+	device->is_intr_enb = true;
+	sdio_release_host(device->func);
+}
+
+void hif_mask_interrupt(void *hif_device)
+{
+	struct hif_device *device = (struct hif_device *)hif_device;
+
+	if (!device || !device->func)
+		return;
+
+	/* Mask our function IRQ */
+	sdio_claim_host(device->func);
+	device->func->card->host->ops->enable_sdio_irq(device->func->card->host,
+						       0);
+	device->is_intr_enb = false;
+	sdio_release_host(device->func);
+}
+
+static struct bus_request *hif_allocate_bus_request(void *hif_device)
+{
+	struct bus_request *busrequest;
+	unsigned long flag;
+	struct hif_device *device = (struct hif_device *)hif_device;
+
+	spin_lock_irqsave(&device->req_free_qlock, flag);
+	/* Remove first in list */
+	busrequest = device->bus_req_free_qhead;
+	if (busrequest)
+		device->bus_req_free_qhead =
+			(struct bus_request *)busrequest->next;
+	spin_unlock_irqrestore(&device->req_free_qlock, flag);
+
+	return busrequest;
+}
+
+static void hif_free_bus_request(struct hif_device *device,
+				 struct bus_request *busrequest)
+{
+	unsigned long flag;
+
+	if (!busrequest)
+		return;
+
+	busrequest->next = NULL;
+
+	/* Insert first in list */
+	spin_lock_irqsave(&device->req_free_qlock, flag);
+	busrequest->next = (struct bus_request *)device->bus_req_free_qhead;
+	device->bus_req_free_qhead = busrequest;
+	spin_unlock_irqrestore(&device->req_free_qlock, flag);
+}
+
+static int hif_disable_func(struct hif_device *device, struct sdio_func *func)
+{
+	int ret;
+	int status = HIF_OK;
+
+	device = get_hif_device(func);
+
+	hif_stop_completion_thread(device);
+	hif_stop_hif_task(device);
+
+	/* Disable the card */
+	sdio_claim_host(device->func);
+	ret = sdio_disable_func(device->func);
+	if (ret)
+		status = HIF_ERROR;
+
+	if (hif_reset_sdio_on_unload && (status == HIF_OK)) {
+		/* Reset the SDIO interface.  This is useful in
+		 * automated testing where the card does not need
+		 * to be removed at the end of the test.  It is
+		 * expected that the user will also unload/reload
+		 * the host controller driver to force the bus driver
+		 * to re-enumerate the slot.
+		 */
+
+		/* NOTE : sdio_f0_writeb() cannot be used here, that API only
+		 * allows access to undefined registers in the range of:
+		 * 0xF0-0xFF
+		 */
+
+		ret = func0_CMD52_write_byte(device->func->card,
+					     SDIO_CCCR_ABORT, (1 << 3));
+		if (ret)
+			status = HIF_ERROR;
+	}
+
+	sdio_release_host(device->func);
+
+	if (status == HIF_OK)
+		device->is_enabled = false;
+	return status;
+}
+
+static int hif_enable_func(struct hif_device *device, struct sdio_func *func)
+{
+	int ret = HIF_OK;
+
+	device = get_hif_device(func);
+
+	if (!device)
+		return HIF_EINVAL;
+
+	if (!device->is_enabled) {
+		/* enable the SDIO function */
+		sdio_claim_host(func);
+
+		/* give us some time to enable, in ms */
+		func->enable_timeout = ENABLE_SDIO_TIMEOUT;
+		ret = sdio_enable_func(func);
+		if (ret) {
+			sdio_release_host(func);
+			return HIF_ERROR;
+		}
+		ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+
+		sdio_release_host(func);
+		if (ret)
+			return HIF_ERROR;
+		device->is_enabled = true;
+
+		if (!device->completion_task) {
+			device->compl_qhead = NULL;
+			device->compl_qtail = NULL;
+			device->completion_shutdown = 0;
+			device->completion_task = kthread_create(
+			    completion_task, (void *)device, "HIFCompl");
+			if (IS_ERR(device->completion_task)) {
+				device->completion_shutdown = 1;
+				return HIF_ERROR;
+			}
+			wake_up_process(device->completion_task);
+		}
+
+		/* create HIF I/O thread */
+		if (!device->hif_task) {
+			device->hif_shutdown = 0;
+			device->hif_task =
+			    kthread_create(hif_task, (void *)device, "HIF");
+			if (IS_ERR(device->hif_task)) {
+				device->hif_shutdown = 1;
+				return HIF_ERROR;
+			}
+			wake_up_process(device->hif_task);
+		}
+	}
+
+	if (!device->claimed_context) {
+		ret = hif_callbacks.dev_inserted_hdl(hif_callbacks.context,
+						     device);
+		if (ret != HIF_OK) {
+			/* Disable the SDIO func & Reset the sdio
+			 * for automated tests to move ahead, where
+			 * the card does not need to be removed at
+			 * the end of the test.
+			 */
+			hif_disable_func(device, func);
+		}
+		(void)sdio_claim_irq(func, hif_irq_handler);
+	}
+
+	return ret;
+}
+
+static void hif_device_removed(struct sdio_func *func)
+{
+	int i;
+	int status = HIF_OK;
+	struct hif_device *device;
+
+	device = get_hif_device(func);
+	if (!device)
+		return;
+
+	for (i = 0; i < MAX_HIF_DEVICES; ++i) {
+		if (hif_devices[i] == device)
+			hif_devices[i] = NULL;
+	}
+
+	if (device->claimed_context) {
+		status = hif_callbacks.dev_removed_hdl(
+		    device->claimed_context, device);
+	}
+
+	/* TBD: Release IRQ (opposite of sdio_claim_irq) */
+	hif_mask_interrupt(device);
+
+	if (device->is_enabled)
+		status = hif_disable_func(device, func);
+
+	del_hif_device(device);
+}
+
+static void *add_hif_device(struct sdio_func *func)
+{
+	struct hif_device *hifdevice = NULL;
+
+	if (!func)
+		return NULL;
+
+	hifdevice = kmalloc(sizeof(*hifdevice), GFP_KERNEL);
+	if (!hifdevice)
+		return NULL;
+
+	memset(hifdevice, 0, sizeof(*hifdevice));
+	hifdevice->func = func;
+	sdio_set_drvdata(func, hifdevice);
+
+	return (void *)hifdevice;
+}
+
+static struct hif_device *get_hif_device(struct sdio_func *func)
+{
+	return (struct hif_device *)sdio_get_drvdata(func);
+}
+
+static void del_hif_device(struct hif_device *device)
+{
+	if (!device)
+		return;
+	kfree(device);
+}
+
+void hif_claim_device(void *hif_device, void *context)
+{
+	struct hif_device *device = (struct hif_device *)hif_device;
+
+	device->claimed_context = context;
+}
+
+void hif_release_device(void *hif_device)
+{
+	struct hif_device *device = (struct hif_device *)hif_device;
+
+	device->claimed_context = NULL;
+}
+
+int hif_attach(void *hif_device, struct cbs_from_hif *callbacks)
+{
+	struct hif_device *device = (struct hif_device *)hif_device;
+
+	if (device->cbs_from_hif.context) {
+		/* already in use! */
+		return HIF_ERROR;
+	}
+	device->cbs_from_hif = *callbacks;
+	return HIF_OK;
+}
+
+static void hif_stop_hif_task(struct hif_device *device)
+{
+	if (device->hif_task) {
+		init_completion(&device->hif_exit);
+		device->hif_shutdown = 1;
+		device->hif_task_work = 1;
+		wake_up(&device->hif_wait);
+		wait_for_completion(&device->hif_exit);
+		device->hif_task = NULL;
+	}
+}
+
+/* hif_reset_target() - Reset target device
+ * @struct hif_device: pointer to struct hif_device structure
+ *
+ * Reset the target by invoking power off and power on
+ * sequence to bring back target into active state.
+ * This API shall be called only when driver load/unload
+ * is in progress.
+ *
+ * Return: 0 on success, error for failure case.
+ */
+static int hif_reset_target(struct hif_device *hif_device)
+{
+	int ret;
+
+	if (!hif_device || !hif_device->func || !hif_device->func->card)
+		return -ENODEV;
+	/* Disable sdio func->pull down WLAN_EN-->pull down DAT_2 line */
+	ret = mmc_power_save_host(hif_device->func->card->host);
+	if (ret)
+		goto done;
+
+	/* pull up DAT_2 line->pull up WLAN_EN-->Enable sdio func */
+	ret = mmc_power_restore_host(hif_device->func->card->host);
+
+done:
+	return ret;
+}
+
+void hif_detach(void *hif_device)
+{
+	struct hif_device *device = (struct hif_device *)hif_device;
+
+	hif_stop_hif_task(device);
+	if (device->ctrl_response_timeout) {
+		/* Reset the target by invoking power off and power on sequence
+		 * to the card to bring back into active state.
+		 */
+		if (hif_reset_target(device))
+			panic("BUG");
+		device->ctrl_response_timeout = false;
+	}
+
+	memset(&device->cbs_from_hif, 0, sizeof(device->cbs_from_hif));
+}
+
+#define SDIO_SET_CMD52_ARG(arg, rw, func, raw, address, writedata) \
+	((arg) = ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | \
+		 (((raw) & 1) << 27) | (1 << 26) | \
+		 (((address) & 0x1FFFF) << 9) | (1 << 8) | \
+		 ((writedata) & 0xFF)))
+
+#define SDIO_SET_CMD52_READ_ARG(arg, func, address) \
+	SDIO_SET_CMD52_ARG(arg, 0, (func), 0, address, 0x00)
+#define SDIO_SET_CMD52_WRITE_ARG(arg, func, address, value) \
+	SDIO_SET_CMD52_ARG(arg, 1, (func), 0, address, value)
+
+static int func0_CMD52_write_byte(struct mmc_card *card, unsigned int address,
+				  unsigned char byte)
+{
+	struct mmc_command ioCmd;
+	unsigned long arg;
+	int status;
+
+	memset(&ioCmd, 0, sizeof(ioCmd));
+	SDIO_SET_CMD52_WRITE_ARG(arg, 0, address, byte);
+	ioCmd.opcode = SD_IO_RW_DIRECT;
+	ioCmd.arg = arg;
+	ioCmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+	status = mmc_wait_for_cmd(card->host, &ioCmd, 0);
+
+	return status;
+}
+
+static int func0_CMD52_read_byte(struct mmc_card *card, unsigned int address,
+				 unsigned char *byte)
+{
+	struct mmc_command ioCmd;
+	unsigned long arg;
+	s32 err;
+
+	memset(&ioCmd, 0, sizeof(ioCmd));
+	SDIO_SET_CMD52_READ_ARG(arg, 0, address);
+	ioCmd.opcode = SD_IO_RW_DIRECT;
+	ioCmd.arg = arg;
+	ioCmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+	err = mmc_wait_for_cmd(card->host, &ioCmd, 0);
+
+	if ((!err) && (byte))
+		*byte = ioCmd.resp[0] & 0xFF;
+
+	return err;
+}
+
+void hif_set_handle(void *hif_handle, void *handle)
+{
+	struct hif_device *device = (struct hif_device *)hif_handle;
+
+	device->caller_handle = handle;
+}
+
+size_t hif_get_device_size(void)
+{
+	return sizeof(struct hif_device);
+}
diff --git a/drivers/net/wireless/qca402x/hif_sdio/hif.h b/drivers/net/wireless/qca402x/hif_sdio/hif.h
new file mode 100644
index 0000000..924d2e4
--- /dev/null
+++ b/drivers/net/wireless/qca402x/hif_sdio/hif.h
@@ -0,0 +1,335 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This file was originally distributed by Qualcomm Atheros, Inc.
+ * before Copyright ownership was assigned to the Linux Foundation.
+ */
+
+#ifndef _HIF_H_
+#define _HIF_H_
+
+#define DEBUG
+#undef DEBUG
+
+#define HIF_OK 0
+#define HIF_PENDING 1
+#define HIF_ERROR 2
+#define HIF_EINVAL 3
+
+/* direction - Direction of transfer (HIF_READ/HIF_WRITE). */
+#define HIF_READ 0x00000001
+#define HIF_WRITE 0x00000002
+#define HIF_DIR_MASK (HIF_READ | HIF_WRITE)
+
+/* type - An interface may support different kind of read/write commands.
+ * For example: SDIO supports CMD52/CMD53s. In case of MSIO it
+ * translates to using different kinds of TPCs. The command type
+ * is thus divided into a basic and an extended command and can
+ * be specified using HIF_BASIC_IO/HIF_EXTENDED_IO.
+ */
+#define HIF_BASIC_IO 0x00000004
+#define HIF_EXTENDED_IO 0x00000008
+#define HIF_TYPE_MASK (HIF_BASIC_IO | HIF_EXTENDED_IO)
+
+/* emode - This indicates the whether the command is to be executed in a
+ * blocking or non-blocking fashion (HIF_SYNCHRONOUS/
+ * HIF_ASYNCHRONOUS). The read/write data paths in HTCA have been
+ * implemented using the asynchronous mode allowing the the bus
+ * driver to indicate the completion of operation through the
+ * registered callback routine. The requirement primarily comes
+ * from the contexts these operations get called from (a driver's
+ * transmit context or the ISR context in case of receive).
+ * Support for both of these modes is essential.
+ */
+#define HIF_SYNCHRONOUS 0x00000010
+#define HIF_ASYNCHRONOUS 0x00000020
+#define HIF_EMODE_MASK (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS)
+
+/* dmode - An interface may support different kinds of commands based on
+ * the tradeoff between the amount of data it can carry and the
+ * setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/
+ * HIF_BLOCK_BASIS). In case of latter, the data is rounded off
+ * to the nearest block size by padding. The size of the block is
+ * configurable at compile time using the HIF_BLOCK_SIZE and is
+ * negotiated with the target during initialization after the
+ * AR6000 interrupts are enabled.
+ */
+#define HIF_BYTE_BASIS 0x00000040
+#define HIF_BLOCK_BASIS 0x00000080
+#define HIF_DMODE_MASK (HIF_BYTE_BASIS | HIF_BLOCK_BASIS)
+
+/* amode - This indicates if the address has to be incremented on AR6000
+ * after every read/write operation (HIF?FIXED_ADDRESS/
+ * HIF_INCREMENTAL_ADDRESS).
+ */
+#define HIF_FIXED_ADDRESS 0x00000100
+#define HIF_INCREMENTAL_ADDRESS 0x00000200
+#define HIF_AMODE_MASK (HIF_FIXED_ADDRESS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_ASYNC_BYTE_FIX \
+	(HIF_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | \
+	 HIF_FIXED_ADDRESS)
+#define HIF_WR_ASYNC_BYTE_INC \
+	(HIF_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | \
+	 HIF_INCREMENTAL_ADDRESS)
+#define HIF_WR_ASYNC_BLOCK_INC \
+	(HIF_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | \
+	 HIF_INCREMENTAL_ADDRESS)
+#define HIF_WR_SYNC_BYTE_FIX \
+	(HIF_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | \
+	 HIF_FIXED_ADDRESS)
+#define HIF_WR_SYNC_BYTE_INC \
+	(HIF_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | \
+	 HIF_INCREMENTAL_ADDRESS)
+#define HIF_WR_SYNC_BLOCK_INC \
+	(HIF_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | \
+	 HIF_INCREMENTAL_ADDRESS)
+#define HIF_WR_ASYNC_BLOCK_FIX \
+	(HIF_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | \
+	 HIF_FIXED_ADDRESS)
+#define HIF_WR_SYNC_BLOCK_FIX \
+	(HIF_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | \
+	 HIF_FIXED_ADDRESS)
+#define HIF_RD_SYNC_BYTE_INC \
+	(HIF_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | \
+	 HIF_INCREMENTAL_ADDRESS)
+#define HIF_RD_SYNC_BYTE_FIX \
+	(HIF_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | \
+	 HIF_FIXED_ADDRESS)
+#define HIF_RD_ASYNC_BYTE_FIX \
+	(HIF_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | \
+	 HIF_FIXED_ADDRESS)
+#define HIF_RD_ASYNC_BLOCK_FIX \
+	(HIF_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | \
+	 HIF_FIXED_ADDRESS)
+#define HIF_RD_ASYNC_BYTE_INC \
+	(HIF_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BYTE_BASIS | \
+	 HIF_INCREMENTAL_ADDRESS)
+#define HIF_RD_ASYNC_BLOCK_INC \
+	(HIF_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | \
+	 HIF_INCREMENTAL_ADDRESS)
+#define HIF_RD_SYNC_BLOCK_INC \
+	(HIF_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | \
+	 HIF_INCREMENTAL_ADDRESS)
+#define HIF_RD_SYNC_BLOCK_FIX \
+	(HIF_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | HIF_BLOCK_BASIS | \
+	 HIF_FIXED_ADDRESS)
+
+enum hif_device_config_opcode {
+	HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
+	HIF_DEVICE_SET_CONTEXT,
+	HIF_DEVICE_GET_CONTEXT,
+};
+
+/* HIF CONFIGURE definitions:
+ *
+ * HIF_DEVICE_GET_MBOX_BLOCK_SIZE
+ * input : none
+ * output : array of 4 u32s
+ * notes: block size is returned for each mailbox (4)
+ *
+ * HIF_DEVICE_SET_CONTEXT
+ * input : arbitrary pointer-sized value
+ * output: none
+ * notes: stores an arbitrary value which can be retrieved later
+ *
+ * HIF_DEVICE_GET_CONTEXT
+ * input: none
+ * output : arbitrary pointer-sized value
+ * notes: retrieves an arbitrary value which was set earlier
+ */
+struct cbs_from_hif {
+	void *context; /* context to pass to the dsrhandler
+			* note : rw_completion_hdl is provided the context
+			* passed to hif_read_write
+			*/
+	int (*rw_completion_hdl)(void *rw_context, int status);
+	int (*dsr_hdl)(void *context);
+};
+
+struct cbs_from_os {
+	void *context; /* context to pass for all callbacks except
+			* dev_removed_hdl the dev_removed_hdl is only called if
+			* the device is claimed
+			*/
+	int (*dev_inserted_hdl)(void *context, void *hif_handle);
+	int (*dev_removed_hdl)(void *claimed_context, void *hif_handle);
+	int (*dev_suspend_hdl)(void *context);
+	int (*dev_resume_hdl)(void *context);
+	int (*dev_wakeup_hdl)(void *context);
+#if defined(DEVICE_POWER_CHANGE)
+	int (*dev_pwr_change_hdl)(void *context,
+				  HIF_DEVICE_POWER_CHANGE_TYPE config);
+#endif /* DEVICE_POWER_CHANGE */
+};
+
+/* other interrupts (non-Recv) are pending, host
+ * needs to read the register table to figure out what
+ */
+#define HIF_OTHER_EVENTS BIT(0)
+
+#define HIF_RECV_MSG_AVAIL BIT(1) /* pending recv packet */
+
+struct hif_pending_events_info {
+	u32 events;
+	u32 look_ahead;
+	u32 available_recv_bytes;
+};
+
+/* function to get pending events , some HIF modules use special mechanisms
+ * to detect packet available and other interrupts
+ */
+typedef int (*HIF_PENDING_EVENTS_FUNC)(void *device,
+				       struct hif_pending_events_info *p_events,
+				       void *async_context);
+
+#define HIF_MASK_RECV TRUE
+#define HIF_UNMASK_RECV FALSE
+/* function to mask recv events */
+typedef int (*HIF_MASK_UNMASK_RECV_EVENT)(void *device, bool mask,
+					  void *async_context);
+
+#ifdef HIF_MBOX_SLEEP_WAR
+/* This API is used to update the target sleep state */
+void hif_set_mbox_sleep(void *device, bool sleep, bool wait,
+			bool cache);
+#endif
+/* This API is used to perform any global initialization of the HIF layer
+ * and to set OS driver callbacks (i.e. insertion/removal) to the HIF layer
+ */
+int hif_init(struct cbs_from_os *callbacks);
+
+/* This API claims the HIF device and provides a context for handling removal.
+ * The device removal callback is only called when the OS claims
+ * a device.  The claimed context must be non-NULL
+ */
+void hif_claim_device(void *device, void *claimed_context);
+
+/* release the claimed device */
+void hif_release_device(void *device);
+
+/* This API allows the calling layer to attach callbacks from HIF */
+int hif_attach(void *device, struct cbs_from_hif *callbacks);
+
+/* This API allows the calling layer to detach callbacks from HIF */
+void hif_detach(void *device);
+
+void hif_set_handle(void *hif_handle, void *handle);
+
+int hif_sync_read(void *device, u32 address, u8 *buffer,
+		  u32 length, u32 request, void *context);
+
+size_t hif_get_device_size(void);
+
+/* This API is used to provide the read/write interface over the specific bus
+ * interface.
+ * address - Starting address in the AR6000's address space. For mailbox
+ * writes, it refers to the start of the mbox boundary. It should
+ * be ensured that the last byte falls on the mailbox's EOM. For
+ * mailbox reads, it refers to the end of the mbox boundary.
+ * buffer - Pointer to the buffer containg the data to be transmitted or
+ * received.
+ * length - Amount of data to be transmitted or received.
+ * request - Characterizes the attributes of the command.
+ */
+int hif_read_write(void *device, u32 address, void *buffer,
+		   u32 length, u32 request, void *context);
+
+/* This can be initiated from the unload driver context when the OS has no more
+ * use for
+ * the device.
+ */
+void hif_shutdown_device(void *device);
+void hif_surprise_removed(void *device);
+
+void hif_mask_interrupt(void *device);
+
+void hif_un_mask_interrupt(void *device);
+
+int hif_configure_device(void *device,
+			 enum hif_device_config_opcode opcode,
+			 void *config, u32 config_len);
+
+/* This API wait for the remaining MBOX messages to be drained
+ * This should be moved to HTCA AR6K layer
+ */
+int hif_wait_for_pending_recv(void *device);
+
+/* BMI and Diag window abstraction
+ */
+
+#define HIF_BMI_EXCHANGE_NO_TIMEOUT ((u32)(0))
+
+#define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be handled
+				   * atomically by DiagRead/DiagWrite
+				   */
+
+#ifdef FEATURE_RUNTIME_PM
+/* Runtime power management API of HIF to control
+ * runtime pm. During Runtime Suspend the get API
+ * return -EAGAIN. The caller can queue the cmd or return.
+ * The put API decrements the usage count.
+ * The get API increments the usage count.
+ * The API's are exposed to HTT and WMI Services only.
+ */
+int hif_pm_runtime_get(void *device);
+int hif_pm_runtime_put(void *device);
+void *hif_runtime_pm_prevent_suspend_init(const char *name);
+void hif_runtime_pm_prevent_suspend_deinit(void *data);
+int hif_pm_runtime_prevent_suspend(void *ol_sc, void *data);
+int hif_pm_runtime_allow_suspend(void *ol_sc, void *data);
+int hif_pm_runtime_prevent_suspend_timeout(void *ol_sc, void *data,
+					   unsigned int delay);
+void hif_request_runtime_pm_resume(void *ol_sc);
+#else
+static inline int hif_pm_runtime_get(void *device)
+{
+	return 0;
+}
+
+static inline int hif_pm_runtime_put(void *device)
+{
+	return 0;
+}
+
+static inline int hif_pm_runtime_prevent_suspend(void *ol_sc, void *context)
+{
+	return 0;
+}
+
+static inline int hif_pm_runtime_allow_suspend(void *ol_sc, void *context)
+{
+	return 0;
+}
+
+static inline int hif_pm_runtime_prevent_suspend_timeout(void *ol_sc,
+							 void *context,
+							 unsigned int msec)
+{
+	return 0;
+}
+
+static inline void *hif_runtime_pm_prevent_suspend_init(const char *name)
+{
+	return NULL;
+}
+
+static inline void hif_runtime_pm_prevent_suspend_deinit(void *context)
+{
+}
+
+static inline void hif_request_runtime_pm_resume(void *ol_sc)
+{
+}
+#endif
+
+#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/qca402x/hif_sdio/hif_internal.h b/drivers/net/wireless/qca402x/hif_sdio/hif_internal.h
new file mode 100644
index 0000000..8b4c11e
--- /dev/null
+++ b/drivers/net/wireless/qca402x/hif_sdio/hif_internal.h
@@ -0,0 +1,117 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This file was originally distributed by Qualcomm Atheros, Inc.
+ * before Copyright ownership was assigned to the Linux Foundation.
+ */
+
+#ifndef _HIF_INTERNAL_H_
+#define _HIF_INTERNAL_H_
+
+#include "hif.h"
+#include "hif_sdio_common.h"
+
+/* Make this large enough to avoid ever failing due to lack of bus requests.
+ * A number that accounts for the total number of credits on the Target plus
+ * outstanding register requests is good.
+ *
+ * FUTURE: could dyanamically allocate busrequest structs as needed.
+ * FUTURE: would be nice for HIF to use HTCA's htca_request. Seems
+ * wasteful to use multiple structures -- one for HTCA and another
+ * for HIF -- and to copy info from one to the other. Maybe should
+ * semi-merge these layers?
+ */
+#define BUS_REQUEST_MAX_NUM 128
+
+#define SDIO_CLOCK_FREQUENCY_DEFAULT 25000000 /* TBD: Can support 50000000
+					       * on real HW?
+					       */
+#define SDWLAN_ENABLE_DISABLE_TIMEOUT 20
+#define FLAGS_CARD_ENAB 0x02
+#define FLAGS_CARD_IRQ_UNMSK 0x04
+
+/* The block size is an attribute of the SDIO function which is
+ * shared by all four mailboxes. We cannot support per-mailbox
+ * block sizes over SDIO.
+ */
+#define HIF_MBOX_BLOCK_SIZE HIF_DEFAULT_IO_BLOCK_SIZE
+#define HIF_MBOX0_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE
+#define HIF_MBOX1_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE
+#define HIF_MBOX2_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE
+#define HIF_MBOX3_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE
+
+struct bus_request {
+	/*struct bus_request*/ void *next; /* link list of available requests */
+	struct completion comp_req;
+	u32 address; /* request data */
+	u8 *buffer;
+	u32 length;
+	u32 req_type;
+	void *context;
+	int status;
+};
+
+struct hif_device {
+	struct sdio_func *func;
+
+	/* Main HIF task */
+	struct task_struct *hif_task; /* task to handle SDIO requests */
+	wait_queue_head_t hif_wait;
+	int hif_task_work;	    /* Signals HIFtask that there is work */
+	int hif_shutdown;	    /* signals HIFtask to stop */
+	struct completion hif_exit; /* HIFtask completion */
+
+	/* HIF Completion task */
+	/* task to handle SDIO completions */
+	struct task_struct *completion_task;
+	wait_queue_head_t completion_wait;
+	int completion_work;
+	int completion_shutdown;
+	struct completion completion_exit;
+
+	/* pending request queue */
+	spinlock_t req_qlock;
+	struct bus_request *req_qhead; /* head of request queue */
+	struct bus_request *req_qtail; /* tail of request queue */
+
+	/* completed request queue */
+	spinlock_t compl_qlock;
+	struct bus_request *compl_qhead;
+	struct bus_request *compl_qtail;
+
+	/* request free list */
+	spinlock_t req_free_qlock;
+	struct bus_request *bus_req_free_qhead; /* free queue */
+
+	/* Space for requests, initially queued to busRequestFreeQueue */
+	struct bus_request bus_request[BUS_REQUEST_MAX_NUM];
+
+	void *claimed_context;
+	struct cbs_from_hif
+	    cbs_from_hif; /* Callbacks made from HIF to caller */
+	bool is_enabled;  /* device is currently enabled? */
+	bool is_intr_enb; /* interrupts are currently unmasked at
+			   * Host - dbg only
+			   */
+	int irq_handling; /* currently processing interrupts */
+	const struct sdio_device_id *id;
+	struct mmc_host *host;
+	void *context;
+	bool ctrl_response_timeout;
+	/* for debug; links hif device back to caller (e.g.HTCA target) */
+	void *caller_handle;
+};
+
+#define CMD53_FIXED_ADDRESS 1
+#define CMD53_INCR_ADDRESS 2
+
+#endif /* _HIF_INTERNAL_H_ */
diff --git a/drivers/net/wireless/qca402x/hif_sdio/hif_sdio_common.h b/drivers/net/wireless/qca402x/hif_sdio/hif_sdio_common.h
new file mode 100644
index 0000000..c325c06
--- /dev/null
+++ b/drivers/net/wireless/qca402x/hif_sdio/hif_sdio_common.h
@@ -0,0 +1,43 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This file was originally distributed by Qualcomm Atheros, Inc.
+ * before Copyright ownership was assigned to the Linux Foundation.
+ */
+
+#ifndef _HIF_SDIO_COMMON_H_
+#define _HIF_SDIO_COMMON_H_
+
+/* The purpose of these blocks is to amortize SDIO command setup time
+ * across multiple bytes of data. In byte mode, we must issue a command
+ * for each byte. In block mode, we issue a command (8B?) for each
+ * BLOCK_SIZE bytes.
+ *
+ * Every mailbox read/write must be padded to this block size. If the
+ * value is too large, we spend more time sending padding bytes over
+ * SDIO. If the value is too small we see less benefit from amortizing
+ * the cost of a command across data bytes.
+ */
+#define HIF_DEFAULT_IO_BLOCK_SIZE 256
+
+#define FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868
+#define FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
+#define FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000
+
+/* Vendor Specific Driver Strength Settings */
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR 0xf2
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_MASK 0x0e
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A 0x02
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C 0x04
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D 0x08
+
+#endif /* _HIF_SDIO_COMMON_H_ */
diff --git a/drivers/net/wireless/qca402x/htca_mbox/htca.h b/drivers/net/wireless/qca402x/htca_mbox/htca.h
new file mode 100644
index 0000000..ce2e0eb
--- /dev/null
+++ b/drivers/net/wireless/qca402x/htca_mbox/htca.h
@@ -0,0 +1,132 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Host-Target Communication API */
+
+#ifndef _HTCA_H_
+#define _HTCA_H_
+
+#define DEBUG
+#undef DEBUG
+
+/* The HTCA API is independent of the underlying interconnect and
+ * independent of the protocols used across that interconnect.
+ */
+
+#define HTCA_OK 0        /* Success */
+#define HTCA_ERROR 1     /* generic error */
+#define HTCA_EINVAL 2    /* Invalid parameter */
+#define HTCA_ECANCELED 3 /* Operation canceled */
+#define HTCA_EPROTO 4    /* Protocol error */
+#define HTCA_ENOMEM 5    /* Memory exhausted */
+
+/* Note: An Endpoint ID is always Interconnect-relative. So we
+ * are likely to see the same Endpoint ID with different Targets
+ * on a multi-Target system.
+ */
+#define HTCA_EP_UNUSED (0xff)
+
+#define HTCA_EVENT_UNUSED 0
+
+/* Start global events */
+#define HTCA_EVENT_GLOBAL_START 1
+#define HTCA_EVENT_TARGET_AVAILABLE 1
+#define HTCA_EVENT_TARGET_UNAVAILABLE 2
+#define HTCA_EVENT_GLOBAL_END 2
+#define HTCA_EVENT_GLOBAL_COUNT                                                \
+		(HTCA_EVENT_GLOBAL_END - HTCA_EVENT_GLOBAL_START + 1)
+/* End global events */
+
+/* Start endpoint-specific events */
+#define HTCA_EVENT_EP_START 3
+#define HTCA_EVENT_BUFFER_RECEIVED 3
+#define HTCA_EVENT_BUFFER_SENT 4
+#define HTCA_EVENT_DATA_AVAILABLE 5
+#define HTCA_EVENT_EP_END 5
+#define HTCA_EVENT_EP_COUNT (HTCA_EVENT_EP_END - HTCA_EVENT_EP_START + 1)
+/* End endpoint-specific events */
+
+/* Maximum size of an HTC header across relevant implementations
+ * (e.g. across interconnect types and platforms and OSes of interest).
+ *
+ * Callers of HTC must leave HTCA_HEADER_LEN_MAX bytes
+ * reserved BEFORE the start of a buffer passed to HTCA htca_buffer_send
+ * AT the start of a buffer passed to HTCBufferReceive
+ * for use by HTC itself.
+ *
+ * FUTURE: Investigate ways to remove the need for callers to accommodate
+ * for HTC headers.* Doesn't seem that hard to do....just tack on the
+ * length in a separate buffer and send buffer pairs to HIF. When extracting,
+ * first pull header then pull payload into paired buffers.
+ */
+
+#define HTCA_HEADER_LEN_MAX 2
+
+struct htca_event_info {
+	u8 *buffer;
+	void *cookie;
+	u32 buffer_length;
+	u32 actual_length;
+	int status;
+};
+
+typedef void (*htca_event_handler)(void *target,
+				   u8 ep,
+				   u8 event_id,
+				   struct htca_event_info *event_info,
+				   void *context);
+
+int htca_init(void);
+
+void htca_shutdown(void);
+
+int htca_start(void *target);
+
+void htca_stop(void *target);
+
+int htca_event_reg(void *target,
+		   u8 end_point_id,
+		   u8 event_id,
+		   htca_event_handler event_handler, void *context);
+
+/* Notes:
+ * buffer should be multiple of blocksize.
+ * buffer should be large enough for header+largest message, rounded up to
+ * blocksize.
+ * buffer passed in should be start of the buffer -- where header will go.
+ * length should be full length, including header.
+ * On completion, buffer points to start of payload (AFTER header).
+ * On completion, actual_length is the length of payload. Does not include
+ * header nor padding. On completion, buffer_length matches the length that
+ * was passed in here.
+ */
+int htca_buffer_receive(void *target,
+			u8 end_point_id, u8 *buffer,
+			u32 length, void *cookie);
+
+/* Notes:
+ * buffer should be multiple of blocksize.
+ * buffer passed in should be start of payload; header will be tacked on BEFORE
+ * this.
+ * extra bytes will be sent, padding the message to blocksize.
+ * length should be the number of payload bytes to be sent.
+ * The actual number of bytes that go over SDIO is length+header, rounded up to
+ * blocksize.
+ * On completion, buffer points to start of payload (AFTER header).
+ * On completion, actual_length is the length of payload. Does not include
+ * header nor padding. On completion buffer_length is irrelevant.
+ */
+int htca_buffer_send(void *target,
+		     u8 end_point_id,
+		     u8 *buffer, u32 length, void *cookie);
+
+#endif /* _HTCA_H_ */
diff --git a/drivers/net/wireless/qca402x/htca_mbox/htca_mbox.c b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox.c
new file mode 100644
index 0000000..fbf3549
--- /dev/null
+++ b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox.c
@@ -0,0 +1,497 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Implementation of Host Target Communication
+ * API v1 and HTCA Protocol v1
+ * over Qualcomm QCA mailbox-based SDIO/SPI interconnects.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "../hif_sdio/hif.h"
+#include "htca.h"
+#include "htca_mbox_internal.h"
+
+struct htca_target *htca_target_list[HTCA_NUM_DEVICES_MAX];
+
+/* Single thread module initialization, module shutdown,
+ * target start and target stop.
+ */
+static DEFINE_MUTEX(htca_startup_mutex);
+static bool htca_initialized;
+
+/* Initialize the HTCA software module.
+ * Typically invoked exactly once.
+ */
+int htca_init(void)
+{
+	struct cbs_from_os callbacks;
+
+	if (mutex_lock_interruptible(&htca_startup_mutex))
+		return HTCA_ERROR; /* interrupted */
+
+	if (htca_initialized) {
+		mutex_unlock(&htca_startup_mutex);
+		return HTCA_OK; /* Already initialized */
+	}
+
+	htca_initialized = true;
+
+	htca_event_table_init();
+
+	memset(&callbacks, 0, sizeof(callbacks));
+	callbacks.dev_inserted_hdl = htca_target_inserted_handler;
+	callbacks.dev_removed_hdl = htca_target_removed_handler;
+	hif_init(&callbacks);
+
+	mutex_unlock(&htca_startup_mutex);
+
+	return HTCA_OK;
+}
+
+/* Shutdown the entire module and free all module data.
+ * Inverse of htca_init.
+ *
+ * May be invoked only after all Targets are stopped.
+ */
+void htca_shutdown(void)
+{
+	int i;
+
+	if (mutex_lock_interruptible(&htca_startup_mutex))
+		return; /* interrupted */
+
+	if (!htca_initialized) {
+		mutex_unlock(&htca_startup_mutex);
+		return; /* Not initialized, so nothing to shut down */
+	}
+
+	for (i = 0; i < HTCA_NUM_DEVICES_MAX; i++) {
+		if (htca_target_instance(i)) {
+			/* One or more Targets are still active --
+			 * cannot shutdown software.
+			 */
+			mutex_unlock(&htca_startup_mutex);
+			WARN_ON(1);
+			return;
+		}
+	}
+
+	hif_shutdown_device(NULL); /* Tell HIF that we're all done */
+	htca_initialized = false;
+
+	mutex_unlock(&htca_startup_mutex);
+}
+
+/* Start a Target. This typically happens once per Target after
+ * the module has been initialized and a Target is powered on.
+ *
+ * When a Target starts, it posts a single credit to each mailbox
+ * and it enters "HTCA configuration". During configuration
+ * negotiation, block sizes for each HTCA endpoint are established
+ * that both Host and Target agree. Once this is complete, the
+ * Target starts normal operation so it can send/receive.
+ */
+int htca_start(void *tar)
+{
+	int status;
+	u32 address;
+	struct htca_target *target = (struct htca_target *)tar;
+
+	mutex_lock(&htca_startup_mutex);
+
+	if (!htca_initialized) {
+		mutex_unlock(&htca_startup_mutex);
+		return HTCA_ERROR;
+	}
+
+	init_waitqueue_head(&target->target_init_wait);
+
+	/* Unmask Host controller interrupts associated with this Target */
+	hif_un_mask_interrupt(target->hif_handle);
+
+	/* Enable all interrupts of interest on the Target. */
+
+	target->enb.int_status_enb = INT_STATUS_ENABLE_ERROR_SET(0x01) |
+				     INT_STATUS_ENABLE_CPU_SET(0x01) |
+				     INT_STATUS_ENABLE_COUNTER_SET(0x01) |
+				     INT_STATUS_ENABLE_MBOX_DATA_SET(0x0F);
+
+	target->enb.cpu_int_status_enb = CPU_INT_STATUS_ENABLE_BIT_SET(0x00);
+
+	target->enb.err_status_enb =
+	    ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01) |
+	    ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01);
+
+	target->enb.counter_int_status_enb =
+	    COUNTER_INT_STATUS_ENABLE_BIT_SET(0xFF);
+
+	/* Commit interrupt register values to Target HW. */
+	address = get_reg_addr(INTR_ENB_REG, ENDPOINT_UNUSED);
+	status =
+	    hif_read_write(target->hif_handle, address, &target->enb,
+			   sizeof(target->enb), HIF_WR_SYNC_BYTE_INC, NULL);
+	if (status != HIF_OK) {
+		_htca_stop(target);
+		mutex_unlock(&htca_startup_mutex);
+		return HTCA_ERROR;
+	}
+
+	/* At this point, we're waiting for the Target to post
+	 * 1 credit to each mailbox. This allows us to begin
+	 * configuration negotiation. We should see an interrupt
+	 * as soon as the first credit is posted. The remaining
+	 * credits should be posted almost immediately after.
+	 */
+
+	/* Wait indefinitely until configuration negotiation with
+	 * the Target completes and the Target tells us it is ready to go.
+	 */
+	if (!target->ready) {
+		/* NB: Retain the htca_statup_mutex during this wait.
+		 * This serializes startup but should be OK.
+		 */
+
+		wait_event_interruptible(target->target_init_wait,
+					 target->ready);
+
+		if (target->ready) {
+			status = HTCA_OK;
+		} else {
+			status = HTCA_ERROR;
+			_htca_stop(target);
+		}
+	}
+
+	mutex_unlock(&htca_startup_mutex);
+	return status;
+}
+
+void _htca_stop(struct htca_target *target)
+{
+	uint ep;
+	struct htca_endpoint *end_point;
+	u32 address;
+
+	/* Note: htca_startup_mutex must be held on entry */
+	if (!htca_initialized)
+		return;
+
+	htca_work_task_stop(target);
+
+	/* Disable interrupts at source, on Target */
+	target->enb.int_status_enb = 0;
+	target->enb.cpu_int_status_enb = 0;
+	target->enb.err_status_enb = 0;
+	target->enb.counter_int_status_enb = 0;
+
+	address = get_reg_addr(INTR_ENB_REG, ENDPOINT_UNUSED);
+
+	/* Try to disable all interrupts on the Target. */
+	(void)hif_read_write(target->hif_handle, address, &target->enb,
+			     sizeof(target->enb), HIF_WR_SYNC_BYTE_INC, NULL);
+
+	/* Disable Host controller interrupts */
+	hif_mask_interrupt(target->hif_handle);
+
+	/* Flush all the queues and return the buffers to their owner */
+	for (ep = 0; ep < HTCA_NUM_MBOX; ep++) {
+		unsigned long flags;
+
+		end_point = &target->end_point[ep];
+
+		spin_lock_irqsave(&end_point->tx_credit_lock, flags);
+		end_point->tx_credits_available = 0;
+		spin_unlock_irqrestore(&end_point->tx_credit_lock, flags);
+
+		end_point->enabled = false;
+
+		/* Flush the Pending Receive Queue */
+		htca_mbox_queue_flush(end_point, &end_point->recv_pending_queue,
+				      &end_point->recv_free_queue,
+				      HTCA_EVENT_BUFFER_RECEIVED);
+
+		/* Flush the Pending Send Queue */
+		htca_mbox_queue_flush(end_point, &end_point->send_pending_queue,
+				      &end_point->send_free_queue,
+				      HTCA_EVENT_BUFFER_SENT);
+	}
+
+	target->ready = false;
+
+	hif_detach(target->hif_handle);
+
+	/* Remove this Target from the global list */
+	htca_target_instance_remove(target);
+
+	/* Free target memory */
+	kfree(target);
+}
+
+void htca_stop(void *tar)
+{
+	struct htca_target *target = (struct htca_target *)tar;
+
+	htca_work_task_stop(target);
+	htca_compl_task_stop(target);
+
+	mutex_lock(&htca_startup_mutex);
+	_htca_stop(target);
+	mutex_unlock(&htca_startup_mutex);
+}
+
+/* Provides an interface for the caller to register for
+ * various events supported by the HTCA module.
+ */
+int htca_event_reg(void *tar,
+		   u8 end_point_id,
+		   u8 event_id,
+		   htca_event_handler event_handler, void *param)
+{
+	int status;
+	struct htca_endpoint *end_point;
+	struct htca_event_info event_info;
+	struct htca_target *target = (struct htca_target *)tar;
+
+	/* Register a new handler BEFORE dispatching events.
+	 * UNregister a handler AFTER dispatching events.
+	 */
+	if (event_handler) {
+		/* Register a new event handler */
+
+		status = htca_add_to_event_table(target, end_point_id, event_id,
+						 event_handler, param);
+		if (status != HTCA_OK)
+			return status; /* Fail to register handler */
+	}
+
+	/* Handle events associated with this handler */
+	switch (event_id) {
+	case HTCA_EVENT_TARGET_AVAILABLE:
+		if (event_handler) {
+			struct htca_target *targ;
+			int i;
+
+			/* Dispatch a Target Available event for all Targets
+			 * that are already present.
+			 */
+			for (i = 0; i < HTCA_NUM_DEVICES_MAX; i++) {
+				targ = htca_target_list[i];
+				if (targ) {
+					size_t size = hif_get_device_size();
+
+					htca_frame_event(&event_info,
+							 (u8 *)targ->hif_handle,
+							 size, size,
+							 HTCA_OK, NULL);
+
+					htca_dispatch_event(
+					    targ, ENDPOINT_UNUSED,
+					    HTCA_EVENT_TARGET_AVAILABLE,
+					    &event_info);
+				}
+			}
+		}
+		break;
+
+	case HTCA_EVENT_TARGET_UNAVAILABLE:
+		break;
+
+	case HTCA_EVENT_BUFFER_RECEIVED:
+		if (!event_handler) {
+			/* Flush the Pending Recv queue before unregistering
+			 * the event handler.
+			 */
+			end_point = &target->end_point[end_point_id];
+			htca_mbox_queue_flush(end_point,
+					      &end_point->recv_pending_queue,
+					      &end_point->recv_free_queue,
+					      HTCA_EVENT_BUFFER_RECEIVED);
+		}
+		break;
+
+	case HTCA_EVENT_BUFFER_SENT:
+		if (!event_handler) {
+			/* Flush the Pending Send queue before unregistering
+			 * the event handler.
+			 */
+			end_point = &target->end_point[end_point_id];
+			htca_mbox_queue_flush(end_point,
+					      &end_point->send_pending_queue,
+					      &end_point->send_free_queue,
+					      HTCA_EVENT_BUFFER_SENT);
+		}
+		break;
+
+	case HTCA_EVENT_DATA_AVAILABLE:
+		/* We could dispatch a data available event. Instead,
+		 * we require users to register this event handler
+		 * before posting receive buffers.
+		 */
+		break;
+
+	default:
+		return HTCA_EINVAL; /* unknown event? */
+	}
+
+	if (!event_handler) {
+		/* Unregister an event handler */
+		status = htca_remove_from_event_table(target,
+						      end_point_id, event_id);
+		if (status != HTCA_OK)
+			return status;
+	}
+
+	return HTCA_OK;
+}
+
+/* Enqueue to the endpoint's recv_pending_queue an empty buffer
+ * which will receive data from the Target.
+ */
+int htca_buffer_receive(void *tar,
+			u8 end_point_id, u8 *buffer,
+			u32 length, void *cookie)
+{
+	struct htca_endpoint *end_point;
+	struct htca_mbox_request *mbox_request;
+	struct htca_event_table_element *ev;
+	unsigned long flags;
+	struct htca_target *target = (struct htca_target *)tar;
+
+	end_point = &target->end_point[end_point_id];
+
+	if (!end_point->enabled)
+		return HTCA_ERROR;
+
+	/* Length must be a multiple of block_size.
+	 * (Ideally, length should match the largest message that can be sent
+	 * over this endpoint, including HTCA header, rounded up to blocksize.)
+	 */
+	if (length % end_point->block_size)
+		return HTCA_EINVAL;
+
+	if (length > HTCA_MESSAGE_SIZE_MAX)
+		return HTCA_EINVAL;
+
+	if (length < HTCA_HEADER_LEN_MAX)
+		return HTCA_EINVAL;
+
+	ev = htca_event_id_to_event(target, end_point_id,
+				    HTCA_EVENT_BUFFER_RECEIVED);
+	if (!ev->handler) {
+		/* In order to use this API, caller must
+		 * register an event handler for HTCA_EVENT_BUFFER_RECEIVED.
+		 */
+		return HTCA_ERROR;
+	}
+
+	spin_lock_irqsave(&end_point->mbox_queue_lock, flags);
+	mbox_request = (struct htca_mbox_request *)htca_request_deq_head(
+	    &end_point->recv_free_queue);
+	spin_unlock_irqrestore(&end_point->mbox_queue_lock, flags);
+	if (!mbox_request)
+		return HTCA_ENOMEM;
+
+	if (WARN_ON(mbox_request->req.target != target))
+		return HTCA_ERROR;
+
+	mbox_request->buffer = buffer;
+	/* includes space for HTCA header */
+	mbox_request->buffer_length = length;
+	/* filled in after message is received */
+	mbox_request->actual_length = 0;
+	mbox_request->end_point = end_point;
+	mbox_request->cookie = cookie;
+
+	spin_lock_irqsave(&end_point->mbox_queue_lock, flags);
+	htca_request_enq_tail(&end_point->recv_pending_queue,
+			      (struct htca_request *)mbox_request);
+	spin_unlock_irqrestore(&end_point->mbox_queue_lock, flags);
+
+	/* Alert the work_task that there may be work to do */
+	htca_work_task_poke(target);
+
+	return HTCA_OK;
+}
+
+/* Enqueue a buffer to be sent to the Target.
+ *
+ * Supplied buffer must be preceded by HTCA_HEADER_LEN_MAX bytes for the
+ * HTCA header (of which HTCA_HEADER_LEN bytes are actually used, and the
+ * remaining are padding).
+ *
+ * Must be followed with sufficient space for block-size padding.
+ *
+ * Example:
+ * To send a 10B message over an endpoint that uses 64B blocks, caller
+ * specifies length=10. HTCA adds HTCA_HEADER_LEN_MAX bytes just before
+ * buffer, consisting of HTCA_HEADER_LEN header bytes followed by
+ * HTCA_HEADER_LEN_MAX-HTCA_HEADER_LEN pad bytes. HTC sends blockSize
+ * bytes starting at buffer-HTCA_HEADER_LEN_MAX.
+ */
+int htca_buffer_send(void *tar,
+		     u8 end_point_id,
+		     u8 *buffer, u32 length, void *cookie)
+{
+	struct htca_endpoint *end_point;
+	struct htca_mbox_request *mbox_request;
+	struct htca_event_table_element *ev;
+	unsigned long flags;
+	struct htca_target *target = (struct htca_target *)tar;
+
+	end_point = &target->end_point[end_point_id];
+
+	if (!end_point->enabled)
+		return HTCA_ERROR;
+
+	if (length + HTCA_HEADER_LEN_MAX > HTCA_MESSAGE_SIZE_MAX)
+		return HTCA_EINVAL;
+
+	ev = htca_event_id_to_event(target, end_point_id,
+				    HTCA_EVENT_BUFFER_SENT);
+	if (!ev->handler) {
+		/* In order to use this API, caller must
+		 * register an event handler for HTCA_EVENT_BUFFER_SENT.
+		 */
+		return HTCA_ERROR;
+	}
+
+	spin_lock_irqsave(&end_point->mbox_queue_lock, flags);
+	mbox_request = (struct htca_mbox_request *)htca_request_deq_head(
+	    &end_point->send_free_queue);
+	spin_unlock_irqrestore(&end_point->mbox_queue_lock, flags);
+	if (!mbox_request)
+		return HTCA_ENOMEM;
+
+	/* Buffer will be adjusted by HTCA_HEADER_LEN later, in
+	 * htca_send_request_to_hif.
+	 */
+	mbox_request->buffer = buffer;
+	mbox_request->buffer_length = length;
+	mbox_request->actual_length = length;
+	mbox_request->end_point = end_point;
+	mbox_request->cookie = cookie;
+
+	spin_lock_irqsave(&end_point->mbox_queue_lock, flags);
+	htca_request_enq_tail(&end_point->send_pending_queue,
+			      (struct htca_request *)mbox_request);
+	spin_unlock_irqrestore(&end_point->mbox_queue_lock, flags);
+
+	/* Alert the work_task that there may be work to do */
+	htca_work_task_poke(target);
+
+	return HTCA_OK;
+}
diff --git a/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_compl.c b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_compl.c
new file mode 100644
index 0000000..c7f8e953
--- /dev/null
+++ b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_compl.c
@@ -0,0 +1,503 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+
+#include "../hif_sdio/hif.h"
+#include "htca.h"
+#include "htca_mbox_internal.h"
+
+/* Host Target Communications Completion Management */
+
+/* Top-level callback handler, registered with HIF to be invoked
+ * whenever a read/write HIF operation completes. Executed in the
+ * context of an HIF task, so we don't want to take much time
+ * here. Pass processing to HTCA's compl_task.
+ *
+ * Used for both reg_requests and mbox_requests.
+ */
+int htca_rw_completion_handler(void *context, int status)
+{
+	struct htca_request *req;
+	struct htca_target *target;
+	unsigned long flags;
+
+	req = (struct htca_request *)context;
+	if (!context) {
+		/* No completion required for this request.
+		 * (e.g. Fire-and-forget register write.)
+		 */
+		return HTCA_OK;
+	}
+
+	target = req->target;
+	req->status = status;
+
+	/* Enqueue this completed request on the
+	 * Target completion queue.
+	 */
+	spin_lock_irqsave(&target->compl_queue_lock, flags);
+	htca_request_enq_tail(&target->compl_queue, (struct htca_request *)req);
+	spin_unlock_irqrestore(&target->compl_queue_lock, flags);
+
+	/* Notify the completion task that it has work */
+	htca_compl_task_poke(target);
+
+	return HTCA_OK;
+}
+
+/* Request-specific callback invoked by the HTCA Completion Task
+ * when a Mbox Send Request completes. Note: Used for Mbox Send
+ * requests; not used for Reg requests.
+ *
+ * Simply dispatch a BUFFER_SENT event to the originator of the request.
+ */
+void htca_send_compl(struct htca_request *req, int status)
+{
+	struct htca_target *target;
+	u8 end_point_id;
+	struct htca_event_info event_info;
+	struct htca_endpoint *end_point;
+	struct htca_mbox_request *mbox_request =
+			(struct htca_mbox_request *)req;
+	unsigned long flags;
+
+	end_point = mbox_request->end_point;
+	target = end_point->target;
+	end_point_id = get_endpoint_id(end_point);
+
+	/* Strip off the HTCA header that was added earlier */
+	mbox_request->buffer += HTCA_HEADER_LEN_MAX;
+
+	/* Prepare event frame to notify caller */
+	htca_frame_event(&event_info, mbox_request->buffer,
+			 mbox_request->buffer_length,
+			 mbox_request->actual_length,
+			 (status == HIF_OK) ? HTCA_OK : HTCA_ECANCELED,
+			 mbox_request->cookie);
+
+	/* Recycle the request */
+	spin_lock_irqsave(&end_point->mbox_queue_lock, flags);
+	htca_request_enq_tail(&end_point->send_free_queue,
+			      (struct htca_request *)mbox_request);
+	spin_unlock_irqrestore(&end_point->mbox_queue_lock, flags);
+	/* Regardless of success/failure, notify caller that HTCA is done
+	 * with his buffer.
+	 */
+	htca_dispatch_event(target, end_point_id, HTCA_EVENT_BUFFER_SENT,
+			    &event_info);
+}
+
+/* Request-specific callback invoked by the HTCA Completion Task
+ * when a Mbox Recv Request completes. Note: Used for Mbox Recv
+ * requests; not used for Reg requests.
+ *
+ * Simply dispatch a BUFFER_RECEIVED event to the originator
+ * of the request.
+ */
+void htca_recv_compl(struct htca_request *req, int status)
+{
+	struct htca_target *target;
+	struct htca_event_info event_info;
+	u8 end_point_id;
+	struct htca_endpoint *end_point;
+	struct htca_mbox_request *mbox_request =
+	    (struct htca_mbox_request *)req;
+	unsigned long flags;
+
+	end_point = mbox_request->end_point;
+	target = end_point->target;
+	end_point_id = get_endpoint_id(end_point);
+
+	/* Signaling:
+	 * Now that we have consumed recv data, clar rx_frame_length so that
+	 * htca_manage_pending_recvs will not try to re-read the same data.
+	 *
+	 * Set need_register_refresh so we can determine whether or not there
+	 * is additional data waiting to be read.
+	 *
+	 * Clear our endpoint from the pending_recv_mask so
+	 * htca_manage_pending_recvs
+	 * is free to issue another read.
+	 *
+	 * Finally, poke the work_task.
+	 */
+	end_point->rx_frame_length = 0;
+	target->need_register_refresh = 1;
+	spin_lock_irqsave(&target->pending_op_lock, flags);
+	target->pending_recv_mask &= ~(1 << end_point_id);
+	spin_unlock_irqrestore(&target->pending_op_lock, flags);
+	htca_work_task_poke(target);
+
+	if (status == HIF_OK) {
+		u32 check_length;
+		/* Length coming from Target is always LittleEndian */
+		check_length = ((mbox_request->buffer[0] << 0) |
+				(mbox_request->buffer[1] << 8));
+		WARN_ON(mbox_request->actual_length != check_length);
+	}
+
+	/* Strip off header */
+	mbox_request->buffer += HTCA_HEADER_LEN_MAX;
+
+	htca_frame_event(&event_info, mbox_request->buffer,
+			 mbox_request->buffer_length,
+			 mbox_request->actual_length,
+			 (status == HIF_OK) ? HTCA_OK : HTCA_ECANCELED,
+			 mbox_request->cookie);
+
+	/* Recycle the request */
+	spin_lock_irqsave(&end_point->mbox_queue_lock, flags);
+	htca_request_enq_tail(&end_point->recv_free_queue,
+			      (struct htca_request *)mbox_request);
+	spin_unlock_irqrestore(&end_point->mbox_queue_lock, flags);
+
+	htca_dispatch_event(target, end_point_id, HTCA_EVENT_BUFFER_RECEIVED,
+			    &event_info);
+}
+
+/* Request-specific callback invoked when a register read/write
+ * request completes. reg_request structures are not used for
+ * register WRITE requests so there's not much to do for writes.
+ *
+ * Note: For Mbox Request completions see htca_send_compl
+ * and htca_recv_compl.
+ */
+
+/* Request-specific callback invoked by the HTCA Completion Task
+ * when a Reg Request completes. Note: Used for Reg requests;
+ * not used for Mbox requests.
+ */
+void htca_reg_compl(struct htca_request *req, int status)
+{
+	struct htca_target *target;
+	struct htca_reg_request *reg_request = (struct htca_reg_request *)req;
+	unsigned long flags;
+
+	if (WARN_ON(!reg_request))
+		return;
+
+	htcadebug("purpose=0x%x\n", reg_request->purpose);
+
+	/* Process async register read/write completion */
+
+	target = reg_request->req.target;
+	if (status != HIF_OK) {
+		/* Recycle the request */
+		reg_request->purpose = UNUSED_PURPOSE;
+		spin_lock_irqsave(&target->reg_queue_lock, flags);
+		htca_request_enq_tail(&target->reg_free_queue,
+				      (struct htca_request *)reg_request);
+		spin_unlock_irqrestore(&target->reg_queue_lock, flags);
+
+		/* A register read/write accepted by HIF
+		 * should never fail.
+		 */
+		WARN_ON(1);
+		return;
+	}
+
+	switch (reg_request->purpose) {
+	case INTR_REFRESH:
+		/* Target register state, including interrupt
+		 * registers, has been fetched.
+		 */
+		htca_register_refresh_compl(target, reg_request);
+		break;
+
+	case CREDIT_REFRESH:
+		htca_credit_refresh_compl(target, reg_request);
+		break;
+
+	case UPDATE_TARG_INTRS:
+	case UPDATE_TARG_AND_ENABLE_HOST_INTRS:
+		htca_update_intr_enbs_compl(target, reg_request);
+		break;
+
+	default:
+		WARN_ON(1); /* unhandled request type */
+		break;
+	}
+
+	/* Recycle this register read/write request */
+	reg_request->purpose = UNUSED_PURPOSE;
+	spin_lock_irqsave(&target->reg_queue_lock, flags);
+	htca_request_enq_tail(&target->reg_free_queue,
+			      (struct htca_request *)reg_request);
+	spin_unlock_irqrestore(&target->reg_queue_lock, flags);
+}
+
+/* After a Register Refresh, uppdate tx_credits_to_reap for each end_point.  */
+static void htca_update_tx_credits_to_reap(struct htca_target *target,
+					   struct htca_reg_request *reg_request)
+{
+	struct htca_endpoint *end_point;
+	int ep;
+
+	for (ep = 0; ep < HTCA_NUM_MBOX; ep++) {
+		end_point = &target->end_point[ep];
+
+		if (reg_request->u.reg_table.status.counter_int_status &
+		    (0x10 << ep)) {
+			end_point->tx_credits_to_reap = true;
+		} else {
+			end_point->tx_credits_to_reap = false;
+		}
+	}
+}
+
+/* After a Register Refresh, uppdate rx_frame_length for each end_point.  */
+static void htca_update_rx_frame_lengths(struct htca_target *target,
+					 struct htca_reg_request *reg_request)
+{
+	struct htca_endpoint *end_point;
+	u32 rx_lookahead;
+	u32 frame_length;
+	int ep;
+
+	htcadebug("Enter\n");
+	for (ep = 0; ep < HTCA_NUM_MBOX; ep++) {
+		end_point = &target->end_point[ep];
+
+		if (end_point->rx_frame_length != 0) {
+			/* NB: Will be cleared in htca_recv_compl after
+			 * frame is read
+			 */
+			continue;
+		}
+
+		if (!(reg_request->u.reg_table.rx_lookahead_valid &
+		      (1 << ep))) {
+			continue;
+		}
+
+		/* The length of the incoming message is contained
+		 * in the first two (HTCA_HEADER_LEN) bytes in
+		 * LittleEndian order.
+		 *
+		 * This length does NOT include the HTCA header nor block
+		 * padding.
+		 */
+		rx_lookahead = reg_request->u.reg_table.rx_lookahead[ep];
+		frame_length = rx_lookahead & 0x0000ffff;
+
+		end_point->rx_frame_length = frame_length;
+		htcadebug("ep#%d : %d\n", ep,
+			  frame_length);
+	}
+}
+
+static unsigned int htca_debug_no_pending; /* debug only */
+
+/* Completion for a register refresh.
+ *
+ * Update rxFrameLengths and tx_credits_to_reap info for
+ * each endpoint. Then handle all pending interrupts (o
+ * if interrupts are currently masked at the Host, handle
+ * all interrupts that would be pending if interrupts
+ * were enabled).
+ *
+ * Called in the context of HIF's completion task whenever
+ * results from a register refresh are received.
+ */
+void htca_register_refresh_compl(struct htca_target *target,
+				 struct htca_reg_request *req)
+{
+	u8 host_int_status;
+	u8 pnd_enb_intrs; /* pending and enabled interrupts */
+	u8 pending_int;
+	u8 enabled_int;
+	unsigned long flags;
+
+	htcadebug("Enter\n");
+
+	if (WARN_ON(target->pending_register_refresh == 0))
+		return;
+
+	spin_lock_irqsave(&target->pending_op_lock, flags);
+	target->pending_register_refresh--;
+	spin_unlock_irqrestore(&target->pending_op_lock, flags);
+
+	htcadebug(
+	    "REGDUMP: hostis=0x%02x cpuis=0x%02x erris=0x%02x cntris=0x%02x\n",
+	    req->u.reg_table.status.host_int_status,
+	    req->u.reg_table.status.cpu_int_status,
+	    req->u.reg_table.status.err_int_status,
+	    req->u.reg_table.status.counter_int_status);
+	htcadebug(
+	    "mbox_frame=0x%02x lav=0x%02x la0=0x%08x la1=0x%08x la2=0x%08x la3=0x%08x\n",
+	    req->u.reg_table.mbox_frame, req->u.reg_table.rx_lookahead_valid,
+	    req->u.reg_table.rx_lookahead[0], req->u.reg_table.rx_lookahead[1],
+	    req->u.reg_table.rx_lookahead[2], req->u.reg_table.rx_lookahead[3]);
+
+	/* Update rxFrameLengths */
+	htca_update_rx_frame_lengths(target, req);
+
+	/* Update tx_credits_to_reap */
+	htca_update_tx_credits_to_reap(target, req);
+
+	/* Process pending Target interrupts. */
+
+	/* Restrict attention to pending interrupts of interest */
+	host_int_status = req->u.reg_table.status.host_int_status;
+
+	/* Unexpected and unhandled */
+	if (WARN_ON(host_int_status & HOST_INT_STATUS_DRAGON_INT_MASK))
+		return;
+
+	/* Form software's idea of pending and enabled interrupts.
+	 * Start with ERRORs and CPU interrupts.
+	 */
+	pnd_enb_intrs = host_int_status &
+			(HOST_INT_STATUS_ERROR_MASK | HOST_INT_STATUS_CPU_MASK);
+
+	/* Software may have intended to enable/disable credit
+	 * counter interrupts; but we commit these updates to
+	 * Target hardware lazily, just before re-enabling
+	 * interrupts. So registers that we have now may not
+	 * reflect the intended state of interrupt enables.
+	 */
+
+	/* Based on software credit enable bits, update pnd_enb_intrs
+	 * (which is like a software copy of host_int_status) as if
+	 * all desired interrupt enables had been committed to HW.
+	 */
+	pending_int = req->u.reg_table.status.counter_int_status;
+	enabled_int = target->enb.counter_int_status_enb;
+	if (pending_int & enabled_int)
+		pnd_enb_intrs |= HOST_INT_STATUS_COUNTER_MASK;
+
+	/* Based on software recv data enable bits, update
+	 * pnd_enb_intrs AS IF all the interrupt enables had
+	 * been committed to HW.
+	 */
+	pending_int = host_int_status;
+	enabled_int = target->enb.int_status_enb;
+	pnd_enb_intrs |= (pending_int & enabled_int);
+
+	if (!pnd_enb_intrs) {
+		/* No enabled interrupts are pending. */
+		htca_debug_no_pending++;
+	}
+
+	/* Invoke specific handlers for each enabled and pending interrupt.
+	 * The goal of each service routine is to clear interrupts at the
+	 * source (on the Target).
+	 *
+	 * We deal with four types of interrupts in the HOST_INT_STATUS
+	 * summary register:
+	 * errors
+	 * This remains set until bits in ERROR_INT_STATUS are cleared
+	 *
+	 * CPU
+	 * This remains set until bits in CPU_INT_STATUS are cleared
+	 *
+	 * rx data available
+	 * These remain set as long as rx data is available. HW clears
+	 * the rx data available enable bits when receive buffers
+	 * are exhausted. If we exhaust Host-side received buffers, we
+	 * mask the rx data available interrupt.
+	 *
+	 * tx credits available
+	 * This remains set until all bits in COUNTER_INT_STATUS are
+	 * cleared by HW after Host SW reaps all credits on a mailbox.
+	 * If credits on an endpoint are sufficient, we mask the
+	 * corresponding COUNTER_INT_STATUS bit. We avoid "dribbling"
+	 * one credit at a time and instead reap them en masse.
+	 *
+	 * The HOST_INT_STATUS register is read-only these bits are cleared
+	 * by HW when the underlying condition is cleared.
+	 */
+
+	if (HOST_INT_STATUS_ERROR_GET(pnd_enb_intrs))
+		htca_service_error_interrupt(target, req);
+
+	if (HOST_INT_STATUS_CPU_GET(pnd_enb_intrs))
+		htca_service_cpu_interrupt(target, req);
+
+	if (HOST_INT_STATUS_COUNTER_GET(pnd_enb_intrs))
+		htca_service_credit_counter_interrupt(target, req);
+
+	/* Always needed in order to at least unmask Host interrupts */
+	htca_work_task_poke(target);
+}
+
+/* Complete an update of interrupt enables. */
+void htca_update_intr_enbs_compl(struct htca_target *target,
+				 struct htca_reg_request *req)
+{
+	htcadebug("Enter\n");
+	if (req->purpose == UPDATE_TARG_AND_ENABLE_HOST_INTRS) {
+		/* NB: non-intuitive, but correct */
+
+		/* While waiting for rxdata and txcred
+		 * interrupts to be disabled at the Target,
+		 * we temporarily masked interrupts at
+		 * the Host. It is now safe to allow
+		 * interrupts (esp. ERROR and CPU) at
+		 * the Host.
+		 */
+		htcadebug("Unmasking\n");
+		hif_un_mask_interrupt(target->hif_handle);
+	}
+}
+
+/* Called to complete htca_credit_refresh_start.
+ *
+ * Ends a credit refresh cycle. Called after decrementing a
+ * credit counter register (many times in a row). HW atomically
+ * decrements the counter and returns the OLD value but HW will
+ * never reduce it below 0.
+ *
+ * Called in the context of the work_task when the credit counter
+ * decrement operation completes synchronously. Called in the
+ * context of the compl_task when the credit counter decrement
+ * operation completes asynchronously.
+ */
+void htca_credit_refresh_compl(struct htca_target *target,
+			       struct htca_reg_request *reg_request)
+{
+	struct htca_endpoint *end_point;
+	unsigned long flags;
+	int reaped;
+	int i;
+
+	/* A non-zero value indicates 1 credit reaped.
+	 * Typically, we will find monotonically descending
+	 * values that reach 0 with the remaining values
+	 * all zero. But we must scan the entire results
+	 * to handle the case where the Target just happened
+	 * to increment credits simultaneously with our
+	 * series of credit decrement operations.
+	 */
+	htcadebug("ep=%d\n", reg_request->epid);
+	end_point = &target->end_point[reg_request->epid];
+	reaped = 0;
+	for (i = 0; i < HTCA_TX_CREDITS_REAP_MAX; i++) {
+		htcadebug("|R0x%02x", reg_request->u.credit_dec_results[i]);
+		if (reg_request->u.credit_dec_results[i])
+			reaped++;
+	}
+
+	htcadebug("\nreaped %d credits on ep=%d\n", reaped, reg_request->epid);
+
+	spin_lock_irqsave(&end_point->tx_credit_lock, flags);
+	end_point->tx_credits_available += reaped;
+	end_point->tx_credit_refresh_in_progress = false;
+	spin_unlock_irqrestore(&end_point->tx_credit_lock, flags);
+
+	htca_work_task_poke(target);
+}
diff --git a/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_events.c b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_events.c
new file mode 100644
index 0000000..d034277
--- /dev/null
+++ b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_events.c
@@ -0,0 +1,130 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+
+#include "../hif_sdio/hif.h"
+#include "htca.h"
+#include "htca_mbox_internal.h"
+
+/* Host Target Communications Event Management */
+
+/* Protect all event tables -- global as well as per-endpoint. */
+static spinlock_t event_lock; /* protects all event tables */
+
+/* Mapping table for global events -- avail/unavail */
+static struct htca_event_table_element
+	global_event_table[HTCA_EVENT_GLOBAL_COUNT];
+
+struct htca_event_table_element *
+htca_event_id_to_event(struct htca_target *target,
+		       u8 end_point_id,
+		       u8 event_id)
+{
+	struct htca_event_table_element *ev = NULL;
+
+	/* is ep event */
+	if ((event_id >= HTCA_EVENT_EP_START) &&
+	    (event_id <= HTCA_EVENT_EP_END)) {
+		struct htca_endpoint *end_point;
+		int ep_evid;
+
+		ep_evid = event_id - HTCA_EVENT_EP_START;
+		end_point = &target->end_point[end_point_id];
+		ev = &end_point->endpoint_event_tbl[ep_evid];
+	/* is global event */
+	} else if ((event_id >= HTCA_EVENT_GLOBAL_START) &&
+		   (event_id <= HTCA_EVENT_GLOBAL_END)) {
+		int global_evid;
+
+		global_evid = event_id - HTCA_EVENT_GLOBAL_START;
+		ev = &global_event_table[global_evid];
+	} else {
+		WARN_ON(1); /* unknown event */
+	}
+
+	return ev;
+}
+
+void htca_dispatch_event(struct htca_target *target,
+			 u8 end_point_id,
+			 u8 event_id,
+			 struct htca_event_info *event_info)
+{
+	struct htca_event_table_element *ev;
+
+	ev = htca_event_id_to_event(target, end_point_id, event_id);
+	if (!ev) {
+		panic("BUG");
+		return;
+	}
+	if (ev->handler) {
+		htca_event_handler handler;
+		void *param;
+		unsigned long flags;
+
+		spin_lock_irqsave(&event_lock, flags);
+		handler = ev->handler;
+		param = ev->param;
+		spin_unlock_irqrestore(&event_lock, flags);
+
+		handler((void *)target, end_point_id, event_id,
+			event_info, param);
+	}
+}
+
+int htca_add_to_event_table(struct htca_target *target,
+			    u8 end_point_id,
+			    u8 event_id,
+			    htca_event_handler handler, void *param) {
+	struct htca_event_table_element *ev;
+	unsigned long flags;
+
+	ev = htca_event_id_to_event(target, end_point_id, event_id);
+	if (!ev)
+		return HTCA_ERROR;
+
+	spin_lock_irqsave(&event_lock, flags);
+	ev->handler = handler;
+	ev->param = param;
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return HTCA_OK;
+}
+
+int htca_remove_from_event_table(struct htca_target *target,
+				 u8 end_point_id,
+				 u8 event_id) {
+	struct htca_event_table_element *ev;
+	unsigned long flags;
+
+	ev = htca_event_id_to_event(target, end_point_id, event_id);
+	if (!ev)
+		return HTCA_ERROR;
+
+	spin_lock_irqsave(&event_lock, flags);
+	/* Clear event handler info */
+	memset(ev, 0, sizeof(*ev));
+	spin_unlock_irqrestore(&event_lock, flags);
+
+	return HTCA_OK;
+}
+
+/* Called once during module initialization */
+void htca_event_table_init(void)
+{
+	spin_lock_init(&event_lock);
+}
diff --git a/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_internal.h b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_internal.h
new file mode 100644
index 0000000..b1c7c6b
--- /dev/null
+++ b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_internal.h
@@ -0,0 +1,581 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _HTCA_INTERNAL_H_
+#define _HTCA_INTERNAL_H_
+
+#include "mbox_host_reg.h"
+
+#if defined(DEBUG)
+#define htcadebug(fmt, a...) \
+	pr_err("htca %s:%d: " fmt, __func__, __LINE__, ##a)
+#else
+#define htcadebug(args...)
+#endif
+
+/* HTCA internal specific declarations and prototypes */
+
+/* Target-side SDIO/SPI (mbox) controller supplies 4 mailboxes */
+#define HTCA_NUM_MBOX 4
+
+/* Software supports at most this many Target devices */
+#define HTCA_NUM_DEVICES_MAX 2
+
+/* Maximum supported mailbox message size.
+ *
+ * Quartz' SDIO/SPI mailbox alias spaces are 2KB each; so changes
+ * would be required to exceed that. WLAN restricts packets to
+ * under 1500B.
+ */
+#define HTCA_MESSAGE_SIZE_MAX 2048
+
+#define HTCA_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
+
+/* The maximum number of credits that we will reap
+ * from the Target at one time.
+ */
+#define HTCA_TX_CREDITS_REAP_MAX 8
+
+/* Mailbox address in SDIO address space */
+#define MBOX_BASE_ADDR 0x800 /* Start of MBOX alias spaces */
+#define MBOX_WIDTH 0x800     /* Width of each mailbox alias space */
+
+#define MBOX_START_ADDR(mbox) (MBOX_BASE_ADDR + ((mbox) * (MBOX_WIDTH)))
+
+/* The byte just before this causes an EndOfMessage interrupt to be generated */
+#define MBOX_END_ADDR(mbox) (MBOX_START_ADDR(mbox) + MBOX_WIDTH)
+
+/* extended MBOX address for larger MBOX writes to MBOX 0 (not used) */
+#define MBOX0_EXTENDED_BASE_ADDR 0x2800
+#define MBOX0_EXTENDED_WIDTH (6 * 1024)
+
+/* HTCA message header */
+struct HTCA_header {
+	u16 total_msg_length;
+} __packed;
+
+#define HTCA_HEADER_LEN sizeof(struct HTCA_header)
+
+/* Populate an htca_event_info structure to be passed to
+ * a user's event handler.
+ */
+static inline void htca_frame_event(struct htca_event_info *event_info,
+				    u8 *buffer, size_t buffer_length,
+				    size_t actual_length, u32 status,
+				    void *cookie)
+{
+	if (event_info) {
+		event_info->buffer = buffer;
+		event_info->buffer_length = buffer_length;
+		event_info->actual_length = actual_length;
+		event_info->status = status;
+		event_info->cookie = cookie;
+	}
+}
+
+/* Global and endpoint-specific event tables use these to
+ * map an event ID --> handler + param.
+ */
+struct htca_event_table_element {
+	htca_event_handler handler;
+	void *param;
+};
+
+/* This layout MUST MATCH Target hardware layout! */
+struct htca_intr_status {
+	u8 host_int_status;
+	u8 cpu_int_status;
+	u8 err_int_status;
+	u8 counter_int_status;
+} __packed;
+
+/* This layout MUST MATCH Target hardware layout! */
+struct htca_intr_enables {
+	u8 int_status_enb;
+	u8 cpu_int_status_enb;
+	u8 err_status_enb;
+	u8 counter_int_status_enb;
+} __packed;
+
+/* The Register table contains Target SDIO/SPI interrupt/rxstatus
+ * registers used by HTCA. Rather than read particular registers,
+ * we use a bulk "register refresh" to read all at once.
+ *
+ * This layout MUST MATCH Target hardware layout!
+ */
+struct htca_register_table {
+	struct htca_intr_status status;
+
+	u8 mbox_frame;
+	u8 rx_lookahead_valid;
+	u8 hole[2];
+
+	/* Four lookahead bytes for each mailbox */
+	u32 rx_lookahead[HTCA_NUM_MBOX];
+} __packed;
+
+/* Two types of requests/responses are supported:
+ * "mbox requests" are messages or data which
+ * are sent to a Target mailbox
+ * "register requests" are to read/write Target registers
+ *
+ * Mbox requests are managed with a per-endpoint
+ * pending list and free list.
+ *
+ * Register requests are managed with a per-Target
+ * pending list and free list.
+ *
+ * A generic HTCA request -- one which is either an
+ * htca_mbox_request or an htca_reg_request is represented
+ * by an htca_request.
+ */
+
+/* Number of mbox_requests and reg_requests allocated initially.  */
+#define HTCA_MBOX_REQUEST_COUNT 16		   /* per mailbox */
+#define HTCA_REG_REQUEST_COUNT (4 * HTCA_NUM_MBOX) /* per target */
+
+/* An htca_request is at the start of a mbox_request structure
+ * and at the start of a reg_request structure.
+ *
+ * Specific request types may be cast to a generic htca_request
+ * (e.g. in order to invoke the completion callback function)
+ */
+struct htca_request {
+	/*struct htca_request*/ void *next; /* linkage */
+	struct htca_target *target;
+	void (*completion_cb)(struct htca_request *req, int status);
+	int status; /* completion status from HIF */
+};
+
+struct htca_endpoint; /* forward reference */
+
+/* Mailbox request -- a message or bulk data */
+struct htca_mbox_request {
+	struct htca_request req; /* Must be first -- (cast to htca_request) */
+
+	/* Caller-supplied cookie associated with this request */
+	void *cookie;
+
+	/* Pointer to the start of the buffer. In the transmit
+	 * direction this points to the start of the payload. In the
+	 * receive direction, however, the buffer when queued up
+	 * points to the start of the HTCA header but when returned
+	 * to the caller points to the start of the payload
+	 *
+	 * Note: buffer is set to NULL whenever this request is free.
+	 */
+	u8 *buffer;
+
+	/* length, in bytes, of the buffer */
+	u32 buffer_length;
+
+	/* length, in bytes, of the payload within the buffer */
+	u32 actual_length;
+
+	struct htca_endpoint *end_point;
+};
+
+/* Round up a value (e.g. length) to a power of 2 (e.g. block size).  */
+static inline u32 htca_round_up(u32 value, u32 pwrof2)
+{
+	return (((value) + (pwrof2) - 1) & ~((pwrof2) - 1));
+}
+
+/* Indicates reasons that we might access Target register space */
+enum htca_req_purpose {
+	UNUSED_PURPOSE,
+	INTR_REFRESH,	/* Fetch latest interrupt/status registers */
+	CREDIT_REFRESH, /* Reap credits */
+	UPDATE_TARG_INTRS,
+	UPDATE_TARG_AND_ENABLE_HOST_INTRS,
+};
+
+/* Register read request -- used to read registers from SDIO/SPI space.
+ * Register writes are fire and forget; no completion is needed.
+ *
+ */
+struct htca_reg_request {
+	struct htca_request req; /* Must be first -- (cast to htca_request) */
+	u8 *buffer;	 /* register value(s) */
+	u32 length;
+
+	/* Indicates the purpose this request was made */
+	enum htca_req_purpose purpose;
+
+	/* Which endpoint this read is for.
+	 * Used when processing a completed credit refresh request.
+	 */
+	u8 epid; /* which endpoint ID [0..3] */
+
+	/* A read to Target register space returns
+	 * one specific Target register value OR
+	 * all values in the register_table OR
+	 * a special repeated read-and-dec from a credit register
+	 *
+	 * FUTURE: We could separate these into separate request
+	 * types in order to perhaps save a bit of space....
+	 * eliminate the union.
+	 */
+	union {
+		struct htca_intr_enables enb;
+		struct htca_register_table reg_table;
+		u8 credit_dec_results[HTCA_TX_CREDITS_REAP_MAX];
+	} u;
+};
+
+struct htca_request_queue {
+	struct htca_request *head;
+	struct htca_request *tail;
+};
+
+#define HTCA_IS_QUEUE_EMPTY(q) (!((q)->head))
+
+/* List of Target registers in SDIO/SPI space which can be accessed by Host */
+enum target_registers {
+	UNUSED_REG = 0,
+	INTR_ENB_REG = INT_STATUS_ENABLE_ADDRESS,
+	ALL_STATUS_REG = HOST_INT_STATUS_ADDRESS,
+	ERROR_INT_STATUS_REG = ERROR_INT_STATUS_ADDRESS,
+	CPU_INT_STATUS_REG = CPU_INT_STATUS_ADDRESS,
+	TX_CREDIT_COUNTER_DECREMENT_REG = COUNT_DEC_ADDRESS,
+	INT_TARGET_REG = INT_TARGET_ADDRESS,
+};
+
+static inline u32 get_reg_addr(enum target_registers which,
+			       u8 epid)
+{
+	return (((which) == TX_CREDIT_COUNTER_DECREMENT_REG)
+	     ? (COUNT_DEC_ADDRESS + (HTCA_NUM_MBOX + (epid)) * 4)
+	     : (which));
+}
+
+/* FUTURE: See if we can use lock-free operations
+ * to manage credits and linked lists.
+ * FUTURE: Use standard Linux queue ops; ESPECIALLY
+ * if they support lock-free operation.
+ */
+
+/* One of these per endpoint */
+struct htca_endpoint {
+	/* Enabled or Disabled */
+	bool enabled;
+
+	/* If data is available, rxLengthPending
+	 * indicates the length of the incoming message.
+	 */
+	u32 rx_frame_length; /* incoming frame length on this endpoint */
+	/* includes HTCA header */
+	/* Modified only by compl_task */
+
+	bool rx_data_alerted; /* Caller was sent a BUFFER_AVAILABLE event */
+	/* and has not supplied a new recv buffer */
+	/* since that warning was sent.  */
+	/* Modified only by work_task */
+
+	bool tx_credits_to_reap; /* At least one credit available at the */
+	/* Target waiting to be reaped. */
+	/* Modified only by compl_task */
+
+	/* Guards tx_credits_available and tx_credit_refresh_in_progress */
+	spinlock_t tx_credit_lock;
+
+	/* The number of credits that we have already reaped
+	 * from the Target. (i.e. we have decremented the Target's
+	 * count register so that we have ability to send future
+	 * messages). We have the ability to send tx_credits_available
+	 * messages without blocking.
+	 *
+	 * The size of a message is endpoint-dependent and always
+	 * a multiple of the device's block_size.
+	 */
+	u32 tx_credits_available;
+
+	/* Maximum message size */
+	u32 max_msg_sz;
+
+	/* Indicates that we are in the midst of a credit refresh cycle */
+	bool tx_credit_refresh_in_progress;
+
+	/* Free/Pending Send/Recv queues are used for mbox requests.
+	 * An mbox Send request cannot be given to HIF until we have
+	 * a tx credit. An mbox Recv request cannot be given to HIF
+	 * until we have a pending rx msg.
+	 *
+	 * The HIF layer maintains its own queue of requests, which
+	 * it uses to serialize access to SDIO. Its queue contains
+	 * a mixture of sends/recvs and mbox/reg requests. HIF is
+	 * "beyond" flow control so once a requets is given to HIF
+	 * it is guaranteed to complete (after all previous requests
+	 * complete).
+	 */
+
+	/* Guards Free/Pending send/recv queues */
+	spinlock_t mbox_queue_lock;
+	struct htca_request_queue send_free_queue;
+	struct htca_request_queue send_pending_queue;
+	struct htca_request_queue recv_free_queue;
+	struct htca_request_queue recv_pending_queue;
+
+	/* Inverse reference to the target */
+	struct htca_target *target;
+
+	/* Block size configured for the endpoint -- common across all endpoints
+	 */
+	u32 block_size;
+
+	/* Mapping table for per-endpoint events */
+	struct htca_event_table_element endpoint_event_tbl[HTCA_EVENT_EP_COUNT];
+
+	/* Location of the endpoint's mailbox space */
+	u32 mbox_start_addr;
+	u32 mbox_end_addr;
+};
+
+#define ENDPOINT_UNUSED 0
+
+/* Target interrupt states. */
+enum intr_state_e {
+	/* rxdata and txcred interrupts enabled.
+	 * Only the DSR context can switch us to
+	 * polled state.
+	 */
+	HTCA_INTERRUPT,
+
+	/* rxdata and txcred interrupts are disabled.
+	 * We are polling (via RegisterRefresh).
+	 * Only the work_task can switch us to
+	 * interrupt state.
+	 */
+	HTCA_POLL,
+};
+
+/* One of these per connected QCA402X device. */
+struct htca_target {
+	/* Target device is initialized and ready to go?
+	 * This has little o do with Host state;
+	 * it reflects readiness of the Target.
+	 */
+	bool ready;
+
+	/* Handle passed to HIF layer for SDIO/SPI Host controller access */
+	void *hif_handle; /* hif_device */
+
+	/* Per-endpoint info */
+	struct htca_endpoint end_point[HTCA_NUM_MBOX];
+
+	/* Used during startup while the Host waits for the
+	 * Target to initialize.
+	 */
+	wait_queue_head_t target_init_wait;
+
+	/* Free queue for htca_reg_requests.
+	 *
+	 * We don't need a regPendingQueue because reads/writes to
+	 * Target register space are not flow controlled by the Target.
+	 * There is no need to wait for credits in order to hand off a
+	 * register read/write to HIF.
+	 *
+	 * The register read/write may end up queued in a HIF queue
+	 * behind both register and mbox reads/writes that were
+	 * handed to HIF earlier. But they will never be queued
+	 * by HTCA.
+	 */
+	spinlock_t reg_queue_lock;
+	struct htca_request_queue reg_free_queue;
+
+	/* comp task synchronization */
+	struct mutex task_mutex;
+
+	struct task_struct *work_task;
+	struct task_struct *compl_task;
+
+	/* work_task synchronization */
+	wait_queue_head_t work_task_wait; /* wait for work to do */
+	bool work_task_has_work;	  /* work available? */
+	bool work_task_shutdown;	  /* requested stop? */
+	struct completion work_task_completion;
+
+	/* compl_task synchronization */
+	wait_queue_head_t compl_task_wait; /* wait for work to do */
+	bool compl_task_has_work;	   /* work available? */
+	bool compl_task_shutdown;	   /* requested stop? */
+	struct completion compl_cask_completion;
+
+	/* Queue of completed mailbox and register requests */
+	spinlock_t compl_queue_lock;
+	struct htca_request_queue compl_queue;
+
+	/* Software's shadow copy of interrupt enables.
+	 * Only the work_task changes intr_enable bits,
+	 * so no locking necessary.
+	 *
+	 * Committed to Target HW when
+	 * we convert from polling to interrupts or
+	 * we are using interrupts and enables have changed
+	 */
+	struct htca_intr_enables enb;
+	struct htca_intr_enables last_committed_enb;
+
+	enum intr_state_e intr_state;
+	int need_start_polling;
+
+	/* Set after we read data from a mailbox (to
+	 * update lookahead and mailbox status bits).
+	 * used only by work_task even though refreshes
+	 * may be started in other contexts.
+	 */
+	int need_register_refresh;
+
+	/* Guards pending_register_refresh and pending_recv_mask */
+	spinlock_t pending_op_lock;
+
+	/* Incremented when a RegisterRefresh is started;
+	 * Decremented when it completes.
+	 */
+	int pending_register_refresh;
+
+	/* Non-zero if a recv operation has been started
+	 * but not yet completed. 1 bit for each ep.
+	 */
+	int pending_recv_mask;
+};
+
+/* Convert an endpoint POINTER into an endpoint ID [0..3] */
+static inline u32 get_endpoint_id(struct htca_endpoint *ep)
+{
+	return (u32)(ep - ep->target->end_point);
+}
+
+void htca_receive_frame(struct htca_endpoint *end_point);
+
+u32 htca_get_frame_length(struct htca_endpoint *end_point);
+
+void htca_send_frame(struct htca_endpoint *end_point);
+
+void htca_send_blk_size(struct htca_endpoint *end_point);
+
+int htca_rw_completion_handler(void *req, int status);
+
+void htca_send_compl(struct htca_request *req, int status);
+
+void htca_recv_compl(struct htca_request *req, int status);
+
+void htca_reg_compl(struct htca_request *req, int status);
+
+int htca_target_inserted_handler(void *context,
+				 void *hif_handle);
+
+int htca_target_removed_handler(void *context, void *hif_handle);
+
+int htca_dsr_handler(void *target_ctxt);
+
+void htca_service_cpu_interrupt(struct htca_target *target,
+				struct htca_reg_request *req);
+
+void htca_service_error_interrupt(struct htca_target *target,
+				  struct htca_reg_request *req);
+
+void htca_service_credit_counter_interrupt(struct htca_target *target,
+					   struct htca_reg_request *req);
+
+void htca_enable_credit_counter_interrupt(struct htca_target *target,
+					  u8 end_point_id);
+
+void htca_disable_credit_counter_interrupt(struct htca_target *target,
+					   u8 end_point_id);
+
+int htca_add_to_mbox_queue(struct htca_mbox_request *queue,
+			   u8 *buffer,
+			   u32 buffer_length,
+			   u32 actual_length, void *cookie);
+
+struct htca_mbox_request *
+htca_remove_from_mbox_queue(struct htca_mbox_request *queue);
+
+void htca_mbox_queue_flush(struct htca_endpoint *end_point,
+			   struct htca_request_queue *pending_queue,
+			   struct htca_request_queue *free_queue,
+			   u8 event_id);
+
+int htca_add_to_event_table(struct htca_target *target,
+			    u8 end_point_id,
+			    u8 event_id,
+			    htca_event_handler handler,
+			    void *param);
+
+int htca_remove_from_event_table(struct htca_target *target,
+				 u8 end_point_id,
+				 u8 event_id);
+
+void htca_dispatch_event(struct htca_target *target,
+			 u8 end_point_id,
+			 u8 event_id,
+			 struct htca_event_info *event_info);
+
+struct htca_target *htca_target_instance(int i);
+
+void htca_target_instance_add(struct htca_target *target);
+
+void htca_target_instance_remove(struct htca_target *target);
+
+u8 htca_get_bit_num_set(u32 data);
+
+void htca_register_refresh(struct htca_target *target);
+
+void free_request(struct htca_request *req,
+		  struct htca_request_queue *queue);
+
+extern struct htca_target *htca_target_list[HTCA_NUM_DEVICES_MAX];
+
+int htca_work_task_start(struct htca_target *target);
+int htca_compl_task_start(struct htca_target *target);
+void htca_work_task_stop(struct htca_target *target);
+void htca_compl_task_stop(struct htca_target *target);
+void htca_work_task_poke(struct htca_target *target);
+void htca_compl_task_poke(struct htca_target *target);
+
+void htca_event_table_init(void);
+struct htca_event_table_element *
+htca_event_id_to_event(struct htca_target *target,
+		       u8 end_point_id,
+		       u8 event_id);
+
+void htca_request_enq_tail(struct htca_request_queue *queue,
+			   struct htca_request *req);
+struct htca_request *htca_request_deq_head(struct htca_request_queue *queue);
+
+void htca_register_refresh_start(struct htca_target *target);
+void htca_register_refresh_compl(struct htca_target *target,
+				 struct htca_reg_request *req);
+
+int htca_credit_refresh_start(struct htca_endpoint *end_point);
+void htca_credit_refresh_compl(struct htca_target *target,
+			       struct htca_reg_request *req);
+
+void htca_update_intr_enbs(struct htca_target *target,
+			   int enable_host_intrs);
+void htca_update_intr_enbs_compl(struct htca_target *target,
+				 struct htca_reg_request *req);
+
+bool htca_negotiate_config(struct htca_target *target);
+
+int htca_recv_request_to_hif(struct htca_endpoint *end_point,
+			     struct htca_mbox_request *mbox_request);
+int htca_send_request_to_hif(struct htca_endpoint *endpoint,
+			     struct htca_mbox_request *mbox_request);
+
+int htca_manage_pending_sends(struct htca_target *target, int epid);
+int htca_manage_pending_recvs(struct htca_target *target, int epid);
+
+void _htca_stop(struct htca_target *target);
+
+#endif /* _HTCA_INTERNAL_H_ */
diff --git a/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_intr.c b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_intr.c
new file mode 100644
index 0000000..0486f59
--- /dev/null
+++ b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_intr.c
@@ -0,0 +1,627 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+
+#include "../hif_sdio/hif.h"
+#include "htca.h"
+#include "htca_mbox_internal.h"
+
+/* Host Target Communications Interrupt Management */
+
+/* Interrupt Management
+ * When an interrupt occurs at the Host, it is to tell us about
+ * a high-priority error interrupt
+ * a CPU interrupt (TBD)
+ * rx data available
+ * tx credits available
+ *
+ * From an interrupt management perspective, rxdata and txcred
+ * interrupts are grouped together. When either of these occurs,
+ * we enter a mode where we repeatedly refresh register state
+ * and act on all interrupt information in the refreshed registers.
+ * We are basically polling with rxdata and txcred interrupts
+ * masked. Eventually, we refresh registers and find no rxdata
+ * and no txcred interrupts pending. At this point, we unmask
+ * those types of interrupts.
+ *
+ * Unmasking is selective: We unmask only the interrupts that
+ * we want to receive which include
+ * -rxdata interrupts for endpoints that have received
+ * buffers on the recv pending queue
+ * -txcred interrupts for endpoints with a very low
+ * count of creditsAvailable
+ * Other rxdata and txcred interrupts are masked. These include:
+ * -rxdata interrupts for endpoint that lack recv buffers
+ * -txcred interrupts for endpoint with lots of credits
+ *
+ * Very little activity takes place in the context of the
+ * interrupt function (Delayed Service Routine). We mask
+ * interrupts at the Host, send a command to disable all
+ * rxdata/txcred interrupts and finally start a register
+ * refresh. When the register refresh completes, we unmask
+ * interrupts on the Host and poke the work_task which now
+ * has valid register state to examine.
+ *
+ * The work_task repeatedly
+ * handles outstanding rx and tx service
+ * starts another register refresh
+ * Every time a register refresh completes, it pokes the
+ * work_task. This cycle continues until the work_task finds
+ * nothing to do after a register refresh. At this point,
+ * it unmasks rxdata/txcred interrupts at the Target (again,
+ * selectively).
+ *
+ * While in the work_task polling cycle, we maintain a notion
+ * of interrupt enables in software rather than commit these
+ * to Target HW.
+ *
+ *
+ * Credit State Machine:
+ * Credits are
+ * -Added by the Target whenever a Target-side receive
+ * buffer is added to a mailbox
+ * -Never rescinded by the Target
+ * -Reaped by this software after a credit refresh cycle
+ * which is initiated
+ * -as a result of a credit counter interrupt
+ * -after a send completes and the number of credits
+ * are below an acceptable threshold
+ * -used by this software when it sends a message HIF to
+ * be sent to the Target
+ *
+ * The process of "reaping" credits involves first issuing
+ * a sequence of reads to the COUNTER_DEC register. (This is
+ * known as the start of a credit refresh.) We issue a large
+ * number of reads in order to reap as many credits at once
+ * as we can. When these reads complete, we determine how
+ * many credits were available and increase software's notion
+ * of tx_credits_available accordingly.
+ *
+ * Note: All Target reads/writes issued from the interrupt path
+ * should be asynchronous. HIF adds such a request to a queue
+ * and immediately returns.
+ *
+ * TBD: It might be helpful for HIF to support a "priority
+ * queue" -- requests that should be issued prior to anything
+ * in its normal queue. Even with this, a request might have
+ * to wait for a while as the current, read/write request
+ * completes on SDIO and then wait for all prior priority
+ * requests to finish. So probably not worth the additional
+ * complexity.
+ */
+
+/* Maximum message sizes for each endpoint.
+ * Must be a multiple of the block size.
+ * Must be no greater than HTCA_MESSAGE_SIZE_MAX.
+ *
+ * TBD: These should be tunable. Example anticipated usage:
+ * ep0: Host-side networking control messages
+ * ep1: Host-side networking data messages
+ * ep2: OEM control messages
+ * ep3: OEM data messages
+ */
+static u32 htca_msg_size[HTCA_NUM_MBOX] = {256, 3 * 512, 512, 2048};
+
+/* Commit the shadow interrupt enables in software to
+ * Target Hardware. This is where the "lazy commit"
+ * occurs. Always called in the context of work_task.
+ *
+ * When the host's intr_state is POLL:
+ * -All credit count interrupts and all rx data interrupts
+ * are disabled at the Target.
+ *
+ * When the host's intr_state is INTERRUPT:
+ * -We commit the shadow copy of interrupt enables.
+ * -A mailbox with low credit count will have the credit
+ * interrupt enabled. A mailbox with high credit count
+ * will have the credit interrupt disabled.
+ * -A mailbox with no available receive buffers will have
+ * the mailbox data interrupt disabled. A mailbox with
+ * at least one receive buffer will have the mailbox
+ * data interrupt enabled.
+ */
+void htca_update_intr_enbs(struct htca_target *target,
+			   int enable_host_intrs)
+{
+	int status;
+	struct htca_reg_request *reg_request;
+	struct htca_intr_enables *enbregs;
+	unsigned long flags;
+	u32 address;
+
+	htcadebug("Enter: enable_host_intrs=%d\n",
+		  enable_host_intrs);
+	htcadebug("ints: 0x%02x  --> 0x%02x\n",
+		  target->last_committed_enb.int_status_enb,
+		  target->enb.int_status_enb);
+	htcadebug("cpu: 0x%02x	--> 0x%02x\n",
+		  target->last_committed_enb.cpu_int_status_enb,
+		  target->enb.cpu_int_status_enb);
+	htcadebug("error: 0x%02x  --> 0x%02x\n",
+		  target->last_committed_enb.err_status_enb,
+		  target->enb.err_status_enb);
+	htcadebug("counters: 0x%02x  --> 0x%02x\n",
+		  target->last_committed_enb.counter_int_status_enb,
+		  target->enb.counter_int_status_enb);
+	if ((target->enb.int_status_enb ==
+			target->last_committed_enb.int_status_enb) &&
+		(target->enb.counter_int_status_enb ==
+			target->last_committed_enb.counter_int_status_enb) &&
+		(target->enb.cpu_int_status_enb ==
+			target->last_committed_enb.cpu_int_status_enb) &&
+		(target->enb.err_status_enb ==
+			target->last_committed_enb.err_status_enb)) {
+		/* No changes to Target-side interrupt enables are required.
+		 * But we must still need to enable Host-side interrupts.
+		 */
+		if (enable_host_intrs) {
+			htcadebug("Unmasking - no change to Target enables\n");
+			hif_un_mask_interrupt(target->hif_handle);
+		}
+		return;
+	}
+
+	spin_lock_irqsave(&target->reg_queue_lock, flags);
+	reg_request = (struct htca_reg_request *)htca_request_deq_head(
+	    &target->reg_free_queue);
+	spin_unlock_irqrestore(&target->reg_queue_lock, flags);
+	if (!reg_request) {
+		WARN_ON(1);
+		return;
+	}
+	if (WARN_ON(reg_request->purpose != UNUSED_PURPOSE))
+		return;
+
+	reg_request->buffer = NULL;
+	reg_request->length = 0;
+	reg_request->epid = 0; /* unused */
+	enbregs = &reg_request->u.enb;
+
+	if (target->intr_state == HTCA_INTERRUPT) {
+		enbregs->int_status_enb = target->enb.int_status_enb;
+		enbregs->counter_int_status_enb =
+		    target->enb.counter_int_status_enb;
+	} else {
+		enbregs->int_status_enb = (target->enb.int_status_enb &
+					   ~HOST_INT_STATUS_MBOX_DATA_MASK);
+		enbregs->counter_int_status_enb = 0;
+	}
+
+	enbregs->cpu_int_status_enb = target->enb.cpu_int_status_enb;
+	enbregs->err_status_enb = target->enb.err_status_enb;
+
+	target->last_committed_enb = *enbregs; /* structure copy */
+
+	if (enable_host_intrs)
+		reg_request->purpose = UPDATE_TARG_AND_ENABLE_HOST_INTRS;
+	else
+		reg_request->purpose = UPDATE_TARG_INTRS;
+
+	address = get_reg_addr(INTR_ENB_REG, ENDPOINT_UNUSED);
+
+	status = hif_read_write(target->hif_handle, address, enbregs,
+				sizeof(*enbregs), HIF_WR_ASYNC_BYTE_INC,
+				reg_request);
+	if (status == HIF_OK && reg_request->req.completion_cb) {
+		reg_request->req.completion_cb(
+		    (struct htca_request *)reg_request, HIF_OK);
+		/* htca_update_intr_enbs_compl */
+	} else if (status == HIF_PENDING) {
+		/* Will complete later */
+	} else { /* HIF error */
+		WARN_ON(1);
+	}
+}
+
+/* Delayed Service Routine, invoked from HIF in thread context
+ * (from sdio's irqhandler) in order to handle interrupts
+ * caused by the Target.
+ *
+ * This serves as a top-level interrupt dispatcher for HTCA.
+ */
+int htca_dsr_handler(void *htca_handle)
+{
+	struct htca_target *target = (struct htca_target *)htca_handle;
+
+	htcadebug("Enter\n");
+	if (target->ready) {
+		/* Transition state to polling mode.
+		 * Temporarily disable intrs at Host
+		 * until interrupts are stopped in
+		 * Target HW.
+		 */
+		htcadebug("Masking interrupts\n");
+		hif_mask_interrupt(target->hif_handle);
+		target->need_start_polling = 1;
+
+		/* Kick off a register refresh so we
+		 * use updated registers in order to
+		 * figure out what needs to be serviced.
+		 *
+		 * RegisterRefresh completion wakes the
+		 * work_task which re-enables Host-side
+		 * interrupts.
+		 */
+		htca_register_refresh_start(target);
+	} else { /* startup time */
+		 /* Assumption is that we are receiving an interrupt
+		  * because the Target made a TX Credit available
+		  * on each endpoint (for configuration negotiation).
+		  */
+
+		hif_mask_interrupt(target->hif_handle);
+		if (htca_negotiate_config(target)) {
+			/* All endpoints are configured.
+			 * Target is now ready for normal operation.
+			 */
+			/* TBDXXX - Fix Quartz-side and remove this */
+			{
+				/* HACK: Signal Target to read mbox Cfg info.
+				 * TBD: Target should use EOM rather than an
+				 * an explicit Target Interrupt for this.
+				 */
+				u8 my_targ_int;
+				u32 address;
+				int status;
+
+				/* Set HTCA_INT_TARGET_INIT_HOST_REQ */
+				my_targ_int = 1;
+
+				address =
+				    get_reg_addr(
+					INT_TARGET_REG, ENDPOINT_UNUSED);
+				status = hif_read_write(
+				    target->hif_handle, address, &my_targ_int,
+				    sizeof(my_targ_int), HIF_WR_SYNC_BYTE_INC,
+				    NULL);
+				if (WARN_ON(status != HIF_OK))
+					return status;
+			}
+			target->ready = true;
+			htcadebug("HTCA TARGET IS READY\n");
+			wake_up(&target->target_init_wait);
+		}
+		hif_un_mask_interrupt(target->hif_handle);
+	}
+	return HTCA_OK;
+}
+
+/* Handler for CPU interrupts that are explicitly
+ * initiated by Target firmware. Not used by system firmware today.
+ */
+void htca_service_cpu_interrupt(struct htca_target *target,
+				struct htca_reg_request *req)
+{
+	int status;
+	u32 address;
+	u8 cpu_int_status;
+
+	htcadebug("Enter\n");
+	cpu_int_status = req->u.reg_table.status.cpu_int_status &
+			 target->enb.cpu_int_status_enb;
+
+	/* Clear pending interrupts on Target -- Write 1 to Clear */
+	address = get_reg_addr(CPU_INT_STATUS_REG, ENDPOINT_UNUSED);
+
+	status =
+	    hif_read_write(target->hif_handle, address, &cpu_int_status,
+			   sizeof(cpu_int_status), HIF_WR_SYNC_BYTE_INC, NULL);
+
+	WARN_ON(status != HIF_OK);
+
+	/* Handle cpu_int_status actions here. None are currently used */
+}
+
+/* Handler for error interrupts on Target.
+ * If everything is working properly we hope never to see these.
+ */
+void htca_service_error_interrupt(struct htca_target *target,
+				  struct htca_reg_request *req)
+{
+	int status = HIF_ERROR;
+	u32 address;
+	u8 err_int_status;
+	struct htca_endpoint *end_point;
+
+	htcadebug("Enter\n");
+	err_int_status =
+	    req->u.reg_table.status.err_int_status & target->enb.err_status_enb;
+
+	end_point = &target->end_point[req->epid];
+	htcadebug("epid=%d txCreditsAvailable=%d\n",
+		  (int)req->epid, end_point->tx_credits_available);
+	htcadebug("statusregs host=0x%02x cpu=0x%02x err=0x%02x cnt=0x%02x\n",
+		  req->u.reg_table.status.host_int_status,
+		  req->u.reg_table.status.cpu_int_status,
+		  req->u.reg_table.status.err_int_status,
+		  req->u.reg_table.status.counter_int_status);
+
+	/* Clear pending interrupts on Target -- Write 1 to Clear */
+	address = get_reg_addr(ERROR_INT_STATUS_REG, ENDPOINT_UNUSED);
+	status =
+	    hif_read_write(target->hif_handle, address, &err_int_status,
+			   sizeof(err_int_status), HIF_WR_SYNC_BYTE_INC, NULL);
+
+	if (WARN_ON(status != HIF_OK))
+		return;
+
+	if (ERROR_INT_STATUS_WAKEUP_GET(err_int_status)) {
+		/* Wakeup */
+		htcadebug("statusregs host=0x%x\n",
+			  ERROR_INT_STATUS_WAKEUP_GET(err_int_status));
+		/* Nothing needed here */
+	}
+
+	if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(err_int_status)) {
+		/* TBD: Rx Underflow */
+		/* Host posted a read to an empty mailbox? */
+		/* Target DMA was not able to keep pace with Host reads? */
+		if (WARN_ON(2)) /* TBD */
+			return;
+	}
+
+	if (ERROR_INT_STATUS_TX_OVERFLOW_GET(err_int_status)) {
+		/* TBD: Tx Overflow */
+		/* Host posted a write to a mailbox with no credits? */
+		/* Target DMA was not able to keep pace with Host writes? */
+		if (WARN_ON(1)) /* TBD */
+			return;
+	}
+}
+
+/* Handler for Credit Counter interrupts from Target.
+ *
+ * This occurs when the number of credits available on a mailbox
+ * increases from 0 to non-zero. (i.e. when Target firmware queues a
+ * DMA Receive buffer to an endpoint that previously had no buffers.)
+ *
+ * This interrupt is masked when we have a sufficient number of
+ * credits available. It is unmasked only when we have reaped all
+ * available credits and are still below a desired threshold.
+ */
+void htca_service_credit_counter_interrupt(struct htca_target *target,
+					   struct htca_reg_request *req)
+{
+	struct htca_endpoint *end_point;
+	u8 counter_int_status;
+	u8 eps_with_credits;
+	int ep;
+
+	htcadebug("Enter\n");
+	counter_int_status = req->u.reg_table.status.counter_int_status;
+
+	/* Service the credit counter interrupt.
+	 * COUNTER bits [4..7] are used for credits on endpoints [0..3].
+	 */
+	eps_with_credits =
+	    counter_int_status & target->enb.counter_int_status_enb;
+	htcadebug("eps_with_credits=0x%02x\n", eps_with_credits);
+	htcadebug("counter_int_status=0x%02x\n", counter_int_status);
+	htcadebug("counter_int_status_enb=0x%02x\n",
+		  target->enb.counter_int_status_enb);
+
+	for (ep = 0; ep < HTCA_NUM_MBOX; ep++) {
+		if (!(eps_with_credits & (0x10 << ep)))
+			continue;
+
+		end_point = &target->end_point[ep];
+
+		/* We need credits on this endpoint AND
+		 * the target tells us that there are some.
+		 * Start a credit refresh cycle on this
+		 * endpoint.
+		 */
+		(void)htca_credit_refresh_start(end_point);
+	}
+}
+
+/* Callback registered with HIF to be invoked when Target
+ * presence is first detected.
+ *
+ * Allocate memory for Target, endpoints, requests, etc.
+ */
+int htca_target_inserted_handler(void *unused_context,
+				 void *hif_handle)
+{
+	struct htca_target *target;
+	struct htca_endpoint *end_point;
+	int ep;
+	struct htca_event_info event_info;
+	struct htca_request_queue *send_free_queue, *recv_free_queue;
+	struct htca_request_queue *reg_queue;
+	u32 block_size[HTCA_NUM_MBOX];
+	struct cbs_from_hif htca_callbacks; /* Callbacks from HIF to HTCA */
+	int status = HTCA_OK;
+	int i;
+
+	htcadebug("Enter\n");
+
+	target = kzalloc(sizeof(*target), GFP_KERNEL);
+	/* target->ready = false; */
+
+	/* Give a handle to HIF for this target */
+	target->hif_handle = hif_handle;
+	hif_set_handle(hif_handle, (void *)target);
+
+	/* Register htca_callbacks from HIF */
+	memset(&htca_callbacks, 0, sizeof(htca_callbacks));
+	htca_callbacks.rw_completion_hdl = htca_rw_completion_handler;
+	htca_callbacks.dsr_hdl = htca_dsr_handler;
+	htca_callbacks.context = target;
+	(void)hif_attach(hif_handle, &htca_callbacks);
+
+	/* Get block sizes and start addresses for each mailbox */
+	hif_configure_device(hif_handle,
+			     HIF_DEVICE_GET_MBOX_BLOCK_SIZE, &block_size,
+			     sizeof(block_size));
+
+	/* Initial software copies of interrupt enables */
+	target->enb.int_status_enb =
+	    INT_STATUS_ENABLE_ERROR_MASK | INT_STATUS_ENABLE_CPU_MASK |
+	    INT_STATUS_ENABLE_COUNTER_MASK | INT_STATUS_ENABLE_MBOX_DATA_MASK;
+
+	/* All 8 CPU interrupts enabled */
+	target->enb.cpu_int_status_enb = CPU_INT_STATUS_ENABLE_BIT_MASK;
+
+	target->enb.err_status_enb = ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK |
+				     ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK;
+
+	/* credit counters in upper bits */
+	target->enb.counter_int_status_enb = COUNTER_INT_STATUS_ENABLE_BIT_MASK;
+
+	spin_lock_init(&target->reg_queue_lock);
+	spin_lock_init(&target->compl_queue_lock);
+	spin_lock_init(&target->pending_op_lock);
+	mutex_init(&target->task_mutex);
+
+	status = htca_work_task_start(target);
+	if (status != HTCA_OK)
+		goto done;
+
+	status = htca_compl_task_start(target);
+	if (status != HTCA_OK)
+		goto done;
+
+	/* Initialize the register request free list */
+	reg_queue = &target->reg_free_queue;
+	for (i = 0; i < HTCA_REG_REQUEST_COUNT; i++) {
+		struct htca_reg_request *reg_request;
+
+		/* Add a reg_request to the Reg Free Queue */
+		reg_request = kzalloc(sizeof(*reg_request), GFP_DMA);
+		reg_request->req.target = target;
+		reg_request->req.completion_cb = htca_reg_compl;
+
+		/* no lock required -- startup */
+		htca_request_enq_tail(reg_queue,
+				      (struct htca_request *)reg_request);
+	}
+
+	/* Initialize endpoints, mbox queues and event tables */
+	for (ep = 0; ep < HTCA_NUM_MBOX; ep++) {
+		end_point = &target->end_point[ep];
+
+		spin_lock_init(&end_point->tx_credit_lock);
+		spin_lock_init(&end_point->mbox_queue_lock);
+
+		end_point->tx_credits_available = 0;
+		end_point->max_msg_sz = htca_msg_size[ep];
+		end_point->rx_frame_length = 0;
+		end_point->tx_credits_to_reap = false;
+		end_point->target = target;
+		end_point->enabled = false;
+		end_point->block_size = block_size[ep];
+		end_point->mbox_start_addr = MBOX_START_ADDR(ep);
+		end_point->mbox_end_addr = MBOX_END_ADDR(ep);
+
+		/* Initialize per-endpoint queues */
+		end_point->send_pending_queue.head = NULL;
+		end_point->send_pending_queue.tail = NULL;
+		end_point->recv_pending_queue.head = NULL;
+		end_point->recv_pending_queue.tail = NULL;
+
+		send_free_queue = &end_point->send_free_queue;
+		recv_free_queue = &end_point->recv_free_queue;
+		for (i = 0; i < HTCA_MBOX_REQUEST_COUNT; i++) {
+			struct htca_mbox_request *mbox_request;
+
+			/* Add an mbox_request to the mbox SEND Free Queue */
+			mbox_request = kzalloc(sizeof(*mbox_request),
+					       GFP_KERNEL);
+			mbox_request->req.target = target;
+			mbox_request->req.completion_cb = htca_send_compl;
+			mbox_request->end_point = end_point;
+			htca_request_enq_tail(
+			    send_free_queue,
+			    (struct htca_request *)mbox_request);
+
+			/* Add an mbox_request to the mbox RECV Free Queue */
+			mbox_request = kzalloc(sizeof(*mbox_request),
+					       GFP_KERNEL);
+			mbox_request->req.target = target;
+			mbox_request->req.completion_cb = htca_recv_compl;
+			mbox_request->end_point = end_point;
+			htca_request_enq_tail(
+			    recv_free_queue,
+			    (struct htca_request *)mbox_request);
+		}
+	}
+
+	/* Target and endpoint structures are now completely initialized.
+	 * Add the target instance to the global list of targets.
+	 */
+	htca_target_instance_add(target);
+
+	/* Frame a TARGET_AVAILABLE event and send it to
+	 * the caller. Return the hif_device handle as a
+	 * parameter with the event.
+	 */
+	htca_frame_event(&event_info, (u8 *)hif_handle,
+			 hif_get_device_size(),
+			 hif_get_device_size(), HTCA_OK, NULL);
+	htca_dispatch_event(target, ENDPOINT_UNUSED,
+			    HTCA_EVENT_TARGET_AVAILABLE, &event_info);
+
+done:
+	return status;
+}
+
+/* Callback registered with HIF to be invoked when Target
+ * is removed
+ *
+ * Also see htca_stop
+ * Stop tasks
+ * Free memory for Target, endpoints, requests, etc.
+ *
+ * TBD: Not yet supported
+ */
+int htca_target_removed_handler(void *unused_context,
+				void *htca_handle)
+{
+	struct htca_target *target = (struct htca_target *)htca_handle;
+	struct htca_event_info event_info;
+	struct htca_endpoint *end_point;
+	int ep;
+
+	htcadebug("Enter\n");
+	/* Disable each of the endpoints to stop accepting requests. */
+	for (ep = 0; ep < HTCA_NUM_MBOX; ep++) {
+		end_point = &target->end_point[ep];
+		end_point->enabled = false;
+	}
+
+	if (target) {
+		/* Frame a TARGET_UNAVAILABLE event and send it to the host */
+		htca_frame_event(&event_info, NULL, 0, 0, HTCA_OK, NULL);
+		htca_dispatch_event(target, ENDPOINT_UNUSED,
+				    HTCA_EVENT_TARGET_UNAVAILABLE, &event_info);
+	}
+
+	/* TBD: call htca_stop? */
+	/* TBD: Must be sure that nothing is going on before we free. */
+	if (WARN_ON(1)) /* TBD */
+		return HTCA_ERROR;
+
+	/* Free everything allocated earlier, including target
+	 * structure and all request structures.
+	 */
+	/* TBD: kfree .... */
+
+	return HTCA_OK;
+}
diff --git a/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_recv.c b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_recv.c
new file mode 100644
index 0000000..0d4eae8
--- /dev/null
+++ b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_recv.c
@@ -0,0 +1,205 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+
+#include "../hif_sdio/hif.h"
+#include "htca.h"
+#include "htca_mbox_internal.h"
+
+/* If there is data available to read on the specified mailbox,
+ * pull a Mailbox Recv Request off of the PendingRecv queue
+ * and request HIF to pull data from the mailbox into the
+ * request's recv buffer.
+ *
+ * If we are not aware of data waiting on the endpoint, simply
+ * return. Note that our awareness is based on slightly stale
+ * data from Quartz registers. Upper layers insure that we are
+ * called shortly after data becomes available on an endpoint.
+ *
+ * If we exhaust receive buffers, disable the mailbox's interrupt
+ * until additional buffers are available.
+ *
+ * Returns 0 if no request was sent to HIF
+ * returns 1 if at least one request was sent to HIF
+ */
+int htca_manage_pending_recvs(struct htca_target *target, int epid)
+{
+	struct htca_endpoint *end_point;
+	struct htca_request_queue *recv_queue;
+	struct htca_mbox_request *mbox_request;
+	u32 rx_frame_length;
+	unsigned long flags;
+	int work_done = 0;
+
+	if (target->pending_recv_mask & (1 << epid)) {
+		/* Receive operation is already in progress on this endpoint */
+		return 0;
+	}
+
+	end_point = &target->end_point[epid];
+
+	/* Hand off requests as long as we have both
+	 * something to recv into
+	 * data waiting to be read on the mailbox
+	 */
+
+	/* rx_frame_length of 0 --> nothing waiting; otherwise, it's
+	 * the length of data waiting to be read, NOT including
+	 * HTCA header nor block padding.
+	 */
+	rx_frame_length = end_point->rx_frame_length;
+
+	recv_queue = &end_point->recv_pending_queue;
+	if (HTCA_IS_QUEUE_EMPTY(recv_queue)) {
+		htcadebug("no recv buff for ep#%d\n", epid);
+		/* Not interested in rxdata interrupts
+		 * since we have no recv buffers.
+		 */
+		target->enb.int_status_enb &= ~(1 << epid);
+
+		if (rx_frame_length) {
+			struct htca_event_info event_info;
+
+			htcadebug("frame waiting (%d): %d\n",
+				  epid, rx_frame_length);
+			/* No buffer ready to receive but data
+			 * is ready. Alert the caller with a
+			 * DATA_AVAILABLE event.
+			 */
+			if (!end_point->rx_data_alerted) {
+				end_point->rx_data_alerted = true;
+
+				htca_frame_event(&event_info, NULL,
+						 rx_frame_length,
+						 0, HTCA_OK, NULL);
+
+				htca_dispatch_event(target, epid,
+						    HTCA_EVENT_DATA_AVAILABLE,
+						    &event_info);
+			}
+		}
+		return 0;
+	}
+
+	/* We have recv buffers available, so we are
+	 * interested in rxdata interrupts.
+	 */
+	target->enb.int_status_enb |= (1 << epid);
+	end_point->rx_data_alerted = false;
+
+	if (rx_frame_length == 0) {
+		htcadebug(
+		    "htca_manage_pending_recvs: buffer available (%d), but no data to recv\n",
+		    epid);
+		/* We have a buffer but there's nothing
+		 * available on the Target to read.
+		 */
+		return 0;
+	}
+
+	/* There is rxdata waiting and a buffer to read it into */
+
+	/* Pull the request buffer off the Pending Recv Queue */
+	spin_lock_irqsave(&end_point->mbox_queue_lock, flags);
+	mbox_request =
+	    (struct htca_mbox_request *)htca_request_deq_head(recv_queue);
+
+	spin_unlock_irqrestore(&end_point->mbox_queue_lock, flags);
+
+	if (!mbox_request)
+		goto done;
+
+	htcadebug("ep#%d receiving frame: %d bytes\n", epid, rx_frame_length);
+
+	spin_lock_irqsave(&target->pending_op_lock, flags);
+	target->pending_recv_mask |= (1 << epid);
+	spin_unlock_irqrestore(&target->pending_op_lock, flags);
+
+	/* Hand off this Mbox Recv request to HIF */
+	mbox_request->actual_length = rx_frame_length;
+	if (htca_recv_request_to_hif(end_point, mbox_request) == HTCA_ERROR) {
+		struct htca_event_info event_info;
+
+		/* TBD: Could requeue this at the HEAD of the
+		 * pending recv queue. Try again later?
+		 */
+
+		/* Frame an event to send to caller */
+		htca_frame_event(&event_info, mbox_request->buffer,
+				 mbox_request->buffer_length,
+				 mbox_request->actual_length, HTCA_ECANCELED,
+				 mbox_request->cookie);
+
+		/* Free the Mailbox request */
+		spin_lock_irqsave(&end_point->mbox_queue_lock, flags);
+		htca_request_enq_tail(&end_point->recv_free_queue,
+				      (struct htca_request *)mbox_request);
+		spin_unlock_irqrestore(&end_point->mbox_queue_lock, flags);
+
+		spin_lock_irqsave(&target->pending_op_lock, flags);
+		target->pending_recv_mask &= ~(1 << epid);
+		spin_unlock_irqrestore(&target->pending_op_lock, flags);
+
+		htca_dispatch_event(target, epid, HTCA_EVENT_BUFFER_RECEIVED,
+				    &event_info);
+		goto done;
+	} else {
+		work_done = 1;
+	}
+
+done:
+	return work_done;
+}
+
+int htca_recv_request_to_hif(struct htca_endpoint *end_point,
+			     struct htca_mbox_request *mbox_request)
+{
+	int status;
+	struct htca_target *target;
+	u32 padded_length;
+	u32 mbox_address;
+	u32 req_type;
+
+	target = end_point->target;
+
+	/* Adjust length for power-of-2 block size */
+	padded_length =
+	    htca_round_up(mbox_request->actual_length + HTCA_HEADER_LEN_MAX,
+			  end_point->block_size);
+
+	req_type = (end_point->block_size > 1) ? HIF_RD_ASYNC_BLOCK_INC
+					       : HIF_RD_ASYNC_BYTE_INC;
+
+	mbox_address = end_point->mbox_start_addr;
+
+	status = hif_read_write(target->hif_handle, mbox_address,
+				&mbox_request->buffer
+				[HTCA_HEADER_LEN_MAX - HTCA_HEADER_LEN],
+				padded_length, req_type, mbox_request);
+
+	if (status == HIF_OK && mbox_request->req.completion_cb) {
+		mbox_request->req.completion_cb(
+		    (struct htca_request *)mbox_request, HTCA_OK);
+		/* htca_recv_compl */
+	} else if (status == HIF_PENDING) {
+		/* Will complete later */
+	} else { /* HIF error */
+		return HTCA_ERROR;
+	}
+
+	return HTCA_OK;
+}
diff --git a/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_send.c b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_send.c
new file mode 100644
index 0000000..ebccf72
--- /dev/null
+++ b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_send.c
@@ -0,0 +1,392 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+
+#include "../hif_sdio/hif.h"
+#include "htca.h"
+#include "htca_mbox_internal.h"
+
+/* Decide when an endpoint is low on tx credits and we should
+ * initiate a credit refresh. If this is set very low, we may
+ * exhaust credits entirely and pause while we wait for credits
+ * to be reaped from the Target. If set very high we may end
+ * up spending excessive time trying to reap when nothing is
+ * available.
+ *
+ * TBD: We could make this something like a percentage of the
+ * most credits we've ever seen on this endpoint. Or make it
+ * a value that automatically adjusts -- increase by one whenever
+ * we exhaust credits; decrease by one whenever a CREDIT_REFRESH
+ * fails to reap any credits.
+ * For now, wait until credits are completely exhausted; then
+ * initiate a credit refresh cycle.
+ */
+#define HTCA_EP_CREDITS_ARE_LOW(_endp) ((_endp)->tx_credits_available == 0)
+
+/* Pull as many Mailbox Send Requests off of the PendingSend queue
+ * as we can (must have a credit for each send) and hand off the
+ * request to HIF.
+ *
+ * This function returns when we exhaust Send Requests OR when we
+ * exhaust credits.
+ *
+ * If we are low on credits, it starts a credit refresh cycle.
+ *
+ * Returns 0 if nothing was send to HIF
+ * returns 1 if at least one request was sent to HIF
+ */
+int htca_manage_pending_sends(struct htca_target *target, int epid)
+{
+	struct htca_endpoint *end_point;
+	struct htca_request_queue *send_queue;
+	struct htca_mbox_request *mbox_request;
+	unsigned long flags;
+	u8 tx_credits_available;
+	int work_done = 0;
+
+	end_point = &target->end_point[epid];
+	send_queue = &end_point->send_pending_queue;
+
+	/* Transmit messages as long as we have both something to send
+	 * tx credits that permit us to send
+	 */
+	while (!HTCA_IS_QUEUE_EMPTY(send_queue)) {
+		spin_lock_irqsave(&end_point->tx_credit_lock, flags);
+		tx_credits_available = end_point->tx_credits_available;
+		if (tx_credits_available)
+			end_point->tx_credits_available--;
+		spin_unlock_irqrestore(&end_point->tx_credit_lock, flags);
+		htcadebug("(ep=%d) tx_credits_available=%d\n",
+			  epid, tx_credits_available);
+		if (!tx_credits_available) {
+			/* We exhausted tx credits */
+			break;
+		}
+
+		/* Get the request buffer from the Pending Send Queue */
+		spin_lock_irqsave(&end_point->mbox_queue_lock, flags);
+		mbox_request =
+		    (struct htca_mbox_request *)htca_request_deq_head(
+			send_queue);
+
+		spin_unlock_irqrestore(&end_point->mbox_queue_lock, flags);
+
+		if (!mbox_request)
+			break;
+
+		/* Hand off this Mbox Send request to HIF */
+		if (htca_send_request_to_hif(end_point, mbox_request) ==
+		    HTCA_ERROR) {
+			struct htca_event_info event_info;
+
+			/* TBD: Could requeue this at the HEAD of the
+			 * pending send queue. Try again later?
+			 */
+
+			/* Restore tx credit, since it was not used */
+			spin_lock_irqsave(&end_point->tx_credit_lock, flags);
+			end_point->tx_credits_available++;
+			spin_unlock_irqrestore(&end_point->tx_credit_lock,
+					       flags);
+
+			/* Frame an event to send to caller */
+			htca_frame_event(&event_info, mbox_request->buffer,
+					 mbox_request->buffer_length,
+					 mbox_request->actual_length,
+					 HTCA_ECANCELED,
+					 mbox_request->cookie);
+
+			/* Free the Mailbox request */
+			spin_lock_irqsave(&end_point->mbox_queue_lock, flags);
+			htca_request_enq_tail(
+			    &end_point->send_free_queue,
+			    (struct htca_request *)mbox_request);
+			spin_unlock_irqrestore(&end_point->mbox_queue_lock,
+					       flags);
+
+			htca_dispatch_event(
+			    target, epid, HTCA_EVENT_BUFFER_SENT, &event_info);
+			goto done;
+		}
+		work_done = 1;
+	}
+
+	htcadebug("ep=%d credsAvail=%d toReap=%d\n",
+		  epid, end_point->tx_credits_available,
+		  end_point->tx_credits_to_reap);
+	if (HTCA_EP_CREDITS_ARE_LOW(end_point)) {
+		target->enb.counter_int_status_enb |= (0x10 << epid);
+		if (end_point->tx_credits_to_reap)
+			htca_credit_refresh_start(end_point);
+	} else {
+		target->enb.counter_int_status_enb &= ~(0x10 << epid);
+	}
+
+done:
+	return work_done;
+}
+
+/* Send one send request to HIF.
+ *
+ * Called from the HTCA task while processing requests from
+ * an endpoint's pendingSendQueue.
+ *
+ * Note: May consider calling this in the context of a process
+ * submitting a new Send Request (i.e. when nothing else is
+ * pending and credits are available). This would save the
+ * cost of context switching to the HTCA Work Task; but it would
+ * require additional synchronization and would add some
+ * complexity. For the high throughput case this optimization
+ * would not help since we are likely to have requests
+ * pending which must be submitted to HIF in the order received.
+ */
+int htca_send_request_to_hif(struct htca_endpoint *end_point,
+			     struct htca_mbox_request *mbox_request)
+{
+	int status;
+	struct htca_target *target;
+	u32 padded_length;
+	u32 mbox_address;
+	u32 req_type;
+
+	target = end_point->target;
+
+	/* Adjust length for power-of-2 block size */
+	padded_length =
+	    htca_round_up(mbox_request->actual_length + HTCA_HEADER_LEN_MAX,
+			  end_point->block_size);
+
+	/* Prepend the message's actual length to the  outgoing message.
+	 * Caller is REQUIRED to leave HTCA_HEADER_LEN_MAX bytes before
+	 * the message for this purpose (of which the first HTCA_HEADER_LEN
+	 * bytes are actually used).
+	 *
+	 * TBD: We may enhance HIF so that a single write request
+	 * may have TWO consecutive components: one for the HTCA header
+	 * and another for the payload. This would remove the burden
+	 * on callers to reserve space in their buffer for HTCA.
+	 *
+	 * TBD: Since the messaging layer sitting on top of HTCA may
+	 * have this same issue it may make sense to allow a Send
+	 * to pass in a "header buffer" along with a "payload buffer".
+	 * So two buffers (or more generally, a list of buffers)
+	 * rather than one on each call.  These buffers would be
+	 * guaranteed to be sent to HIF as a group and they would
+	 * be sent over SDIO back to back.
+	 */
+	mbox_request->buffer -= HTCA_HEADER_LEN_MAX;
+
+	if (HTCA_HEADER_LEN_MAX > HTCA_HEADER_LEN) {
+		/* Sanity: clear padding bytes, if used */
+		memset(&mbox_request->buffer[HTCA_HEADER_LEN], 0,
+		       HTCA_HEADER_LEN_MAX - HTCA_HEADER_LEN);
+	}
+	/* Target receives length in LittleEndian byte order
+	 * regardeless of Host endianness.
+	 */
+	mbox_request->buffer[0] = mbox_request->actual_length & 0xff;
+	mbox_request->buffer[1] = (mbox_request->actual_length >> 8) & 0xff;
+
+	req_type = (end_point->block_size > 1) ? HIF_WR_ASYNC_BLOCK_INC
+					       : HIF_WR_ASYNC_BYTE_INC;
+
+	/* Arrange for last byte of the message to generate an
+	 * EndOfMessage interrupt to the Target.
+	 */
+	mbox_address = end_point->mbox_end_addr - padded_length;
+
+	/* Send the request to HIF */
+	status = hif_read_write(target->hif_handle, mbox_address,
+				mbox_request->buffer, padded_length, req_type,
+				mbox_request);
+
+	if (status == HIF_OK && mbox_request->req.completion_cb) {
+		mbox_request->req.completion_cb(
+		    (struct htca_request *)mbox_request, HTCA_OK);
+		/* htcaSendCompletionCB */
+	} else if (status == HIF_PENDING) {
+		/* Will complete later */
+	} else { /* HIF error */
+		/* Restore mbox_request buffer */
+		mbox_request->buffer += HTCA_HEADER_LEN_MAX;
+		return HTCA_ERROR;
+	}
+
+	return HTCA_OK;
+}
+
+/* Start a credit refresh cycle. Credits will appear in
+ * end_point->tx_credits_available when this refresh completes.
+ *
+ * Called in the context of the work_task when we are unable
+ * to send any more requests because credits are exhausted.
+ * Also called from HIF completion's context when a credit
+ * interrupt occurs.
+ *
+ * TBD: Consider HTCA v2 features: Quartz FW can send
+ * in-band TX Credit hint
+ * RX Length hint
+ * interrupt status registers
+ * as opportunistic trailer(s) on an RX message.
+ * This increases code complexity but may reduce overhead
+ * since we may reduce the number of explicit SDIO register
+ * read operations which are relatively expensive "byte basis"
+ * operations.
+ */
+int htca_credit_refresh_start(struct htca_endpoint *end_point)
+{
+	u8 end_point_id;
+	int status;
+	struct htca_target *target;
+	struct htca_reg_request *reg_request;
+	unsigned long flags;
+	bool already_in_progress;
+	u32 address;
+
+	htcadebug("Enter\n");
+
+	spin_lock_irqsave(&end_point->tx_credit_lock, flags);
+	already_in_progress = end_point->tx_credit_refresh_in_progress;
+	end_point->tx_credit_refresh_in_progress = true;
+	spin_unlock_irqrestore(&end_point->tx_credit_lock, flags);
+
+	if (already_in_progress)
+		return 0;
+
+	target = end_point->target;
+	end_point_id = get_endpoint_id(end_point);
+	htcadebug("on endpoint %d\n", end_point_id);
+
+	spin_lock_irqsave(&target->reg_queue_lock, flags);
+	reg_request = (struct htca_reg_request *)htca_request_deq_head(
+	    &target->reg_free_queue);
+	spin_unlock_irqrestore(&target->reg_queue_lock, flags);
+
+	if (!reg_request) {
+		WARN_ON(1);
+		return 1;
+	}
+
+	if (WARN_ON(reg_request->purpose != UNUSED_PURPOSE))
+		return 1;
+
+	reg_request->buffer = NULL;
+	reg_request->length = 0;
+	reg_request->purpose = CREDIT_REFRESH;
+	reg_request->epid = end_point_id;
+
+	address = get_reg_addr(TX_CREDIT_COUNTER_DECREMENT_REG, end_point_id);
+
+	/* Note: reading many times FROM a FIXed register address, the
+	 * "atomic decrement address". The function htca_credit_refresh_compl
+	 * examines the results upon completion.
+	 */
+	status = hif_read_write(
+	    target->hif_handle, address, reg_request->u.credit_dec_results,
+	    HTCA_TX_CREDITS_REAP_MAX, HIF_RD_ASYNC_BYTE_FIX, reg_request);
+	if (status == HIF_OK && reg_request->req.completion_cb) {
+		reg_request->req.completion_cb(
+		    (struct htca_request *)reg_request, HIF_OK);
+		/* htca_credit_refresh_compl */
+	} else if (status == HIF_PENDING) {
+		/* Will complete later */
+	} else { /* HIF error */
+		WARN_ON(1);
+	}
+	return 1;
+}
+
+/* Used during Configuration Negotiation at startup
+ * to configure max message sizes for each endpoint.
+ *
+ * Returns true if all endpoints have been configured,
+ * by this pass and/or all earlier calls. (Typically
+ * there should be only a single call which enables
+ * all endpoints at once.)
+ *
+ * Returns false if at least one endpoint has not
+ * yet been configured.
+ */
+bool htca_negotiate_config(struct htca_target *target)
+{
+	int status;
+	struct htca_endpoint *end_point;
+	u32 address;
+	int enb_count = 0;
+	int ep;
+
+	htcadebug("Enter\n");
+
+	/* The Target should have posted 1 credit to
+	 * each endpoint by the time we reach here.
+	 */
+	for (ep = 0; ep < HTCA_NUM_MBOX; ep++) {
+		end_point = &target->end_point[ep];
+		if (end_point->enabled) {
+			/* This endpoint was already enabled */
+			enb_count++;
+			continue;
+		}
+		htcadebug("try epid=%d\n", ep);
+
+		address = get_reg_addr(TX_CREDIT_COUNTER_DECREMENT_REG, ep);
+		end_point->tx_credits_available = 0;
+		status =
+		    hif_read_write(target->hif_handle, address,
+				   (u8 *)&end_point->tx_credits_available,
+				   1, HIF_RD_SYNC_BYTE_FIX, NULL);
+		if (status != HIF_OK) {
+			htcadebug("DBG: address=0x%08x status=%d\n", address,
+				  status);
+		}
+		if (WARN_ON(status != HIF_OK))
+			return false;
+
+		if (!end_point->tx_credits_available) {
+			/* not yet ready -- no credit posted.  Odd case. */
+			continue;
+		}
+		if (WARN_ON(end_point->tx_credits_available != 1))
+			return false;
+
+		end_point->tx_credits_available--;
+
+		/* TBD: Tacitly assumes LittleEndian Host.
+		 * This -- rather than an explicit Host interrupt -- is
+		 * what should trigger Target to fetch blocksize.
+		 */
+		htcadebug("good to go epid=%d\n", ep);
+
+		/* "Negotiate" the message size for this endpoint by writing
+		 * the maximum message size (and trigger EOM).
+		 */
+		address =
+		    end_point->mbox_end_addr - sizeof(end_point->max_msg_sz);
+		status = hif_read_write(target->hif_handle, address,
+					(u8 *)&end_point->max_msg_sz,
+					sizeof(end_point->max_msg_sz),
+					HIF_WR_SYNC_BYTE_INC, NULL);
+		if (WARN_ON(status != HIF_OK))
+			return false;
+
+		end_point->enabled = true;
+		enb_count++;
+	}
+
+	htcadebug("enb_count=%d\n", enb_count);
+	return (enb_count == HTCA_NUM_MBOX);
+}
diff --git a/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_task.c b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_task.c
new file mode 100644
index 0000000..6598cba
--- /dev/null
+++ b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_task.c
@@ -0,0 +1,340 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Implementation of Host Target Communication tasks,
+ * WorkTask and compl_task, which are used to manage
+ * the Mbox Pending Queues.
+ *
+ * A mailbox Send request is queued in arrival order on
+ * a per-mailbox Send queue until a credit is available
+ * from the Target. Requests in this queue are
+ * waiting for the Target to provide tx credits (i.e. recv
+ * buffers on the Target-side).
+ *
+ * A mailbox Recv request is queued in arrival order on
+ * a per-mailbox Recv queue until a message is available
+ * to be read. So requests in this queue are waiting for
+ * the Target to provide rx data.
+ *
+ * htca_work_task dequeues requests from the SendPendingQueue
+ * (once credits are available) and dequeues requests from
+ * the RecvPendingQueue (once rx data is available) and
+ * hands them to HIF for processing.
+ *
+ * htca_compl_task handles completion processing after
+ * HIF completes a request.
+ *
+ * The main purpose of these tasks is to provide a
+ * suitable suspendable context for processing requests
+ * and completions.
+ */
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+
+#include "../hif_sdio/hif.h"
+#include "htca.h"
+#include "htca_mbox_internal.h"
+
+/* Wakeup the htca_work_task.
+ *
+ * Invoked whenever send/recv state changes:
+ * new Send buffer added to the send_pending_queue
+ * new Recv buffer added to the recv_pending_queue
+ * tx credits are reaped
+ * rx data available recognized
+ */
+void htca_work_task_poke(struct htca_target *target)
+{
+	target->work_task_has_work = true;
+	wake_up_interruptible_sync(&target->work_task_wait);
+}
+
+/* Body of the htca_work_task, which hands Send and
+ * Receive requests to HIF.
+ */
+static int htca_work_task_core(struct htca_target *target)
+{
+	int ep;
+	int work_done = 0;
+
+	/* TBD: We might consider alternative ordering policies, here,
+	 * between Sends and Recvs and among mailboxes. The current
+	 * algorithm is simple.
+	 */
+
+	/* Process sends/recvs */
+	for (ep = 0; ep < HTCA_NUM_MBOX; ep++) {
+		htcadebug("Call (%d)\n", ep);
+		work_done += htca_manage_pending_sends(target, ep);
+		htcadebug("Call (%d)\n", ep);
+		work_done += htca_manage_pending_recvs(target, ep);
+	}
+
+	return work_done;
+}
+
+/* Only this work_task is permitted to update
+ * interrupt enables. That restriction eliminates
+ * complex race conditions.
+ */
+static int htca_work_task(void *param)
+{
+	struct htca_target *target = (struct htca_target *)param;
+
+	/* set_user_nice(current, -3); */
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	for (;;) {
+		htcadebug("top of loop. intr_state=%d\n", target->intr_state);
+		/* Wait for htca_work_task_poke */
+		wait_event_interruptible(target->work_task_wait,
+					 target->work_task_has_work);
+
+		if (target->work_task_shutdown)
+			break; /* htcaTaskStop invoked */
+
+		if (!target->work_task_has_work)
+			break; /* exit, if this task was interrupted */
+
+		/* reset before we start work */
+		target->work_task_has_work = false;
+		barrier();
+
+		if (target->need_start_polling) {
+			/* reset for next time */
+			target->need_start_polling = 0;
+			target->intr_state = HTCA_POLL;
+			htca_update_intr_enbs(target, 1);
+		}
+
+		while (htca_work_task_core(target))
+			;
+
+		if (target->pending_recv_mask ||
+		    target->pending_register_refresh) {
+			continue;
+		}
+
+		/* When a Recv completes, it sets need_register_refresh=1
+		 * and pokes the work_task.
+		 *
+		 * We won't actually initiate a register refresh until
+		 * pending recvs on ALL eps have completed. This may
+		 * increase latency slightly but it increases efficiency
+		 * and reduces chatter which should improve throughput.
+		 * Note that even though we don't initiate the register
+		 * refresh immediately, SDIO is still 100% busy doing
+		 * useful work. The refresh is issued shortly after.
+		 */
+		if (target->need_register_refresh) {
+			/* Continue to poll. When the RegsiterRefresh
+			 * completes, the WorkTask will be poked.
+			 */
+			target->need_register_refresh = 0;
+			htca_register_refresh_start(target);
+			continue;
+		}
+
+		/* If more work has arrived since we last checked,
+		 * make another pass.
+		 */
+		if (target->work_task_has_work)
+			continue;
+
+		/* As long as we are constantly refreshing register
+		 * state and reprocessing, there is no need to
+		 * enable interrupts. We are essentially POLLING for
+		 * interrupts anyway. But if
+		 * -we were in POLL mode and
+		 * -we have processed all outstanding sends/recvs and
+		 * -there are no PENDING recv operations and
+		 * -there is no pending register refresh (so
+		 * no recv operations have completed since the
+		 * last time we refreshed register state)
+		 * then we switch to INTERRUPT mode and re-enable
+		 * Target-side interrupts.
+		 *
+		 * We'll sleep until poked:
+		 * -DSR handler receives an interrupt
+		 * -application enqueues a new send/recv buffer
+		 * We must also UPDATE interrupt enables even if we
+		 * were already in INTERRUPT mode, since some bits
+		 * may have changed.
+		 */
+		if (target->intr_state == HTCA_POLL) {
+			target->intr_state = HTCA_INTERRUPT;
+			htca_update_intr_enbs(target, 0);
+		}
+	}
+	complete_and_exit(&target->work_task_completion, 0);
+
+	return 0;
+}
+
+int htca_work_task_start(struct htca_target *target)
+{
+	int status = HTCA_ERROR;
+
+	if (mutex_lock_interruptible(&target->task_mutex))
+		return HTCA_ERROR; /* interrupted */
+
+	if (target->work_task)
+		goto done; /* already started */
+
+	target->work_task = kthread_create(htca_work_task, target, "htcaWork");
+	if (!target->work_task)
+		goto done; /* Failed to create task */
+
+	target->work_task_shutdown = false;
+	init_waitqueue_head(&target->work_task_wait);
+	init_completion(&target->work_task_completion);
+	wake_up_process(target->work_task);
+	status = HTCA_OK;
+
+done:
+	mutex_unlock(&target->task_mutex);
+	return status;
+}
+
+void htca_work_task_stop(struct htca_target *target)
+{
+	if (mutex_lock_interruptible(&target->task_mutex))
+		return; /* interrupted */
+
+	if (!target->work_task)
+		goto done;
+
+	target->work_task_shutdown = true;
+	htca_work_task_poke(target);
+	wait_for_completion(&target->work_task_completion);
+	target->work_task = NULL;
+
+done:
+	mutex_unlock(&target->task_mutex);
+}
+
+/* Wakeup the compl_task.
+ * Invoked after adding a new completion to the compl_queue.
+ */
+void htca_compl_task_poke(struct htca_target *target)
+{
+	target->compl_task_has_work = true;
+	wake_up_interruptible_sync(&target->compl_task_wait);
+}
+
+static int htca_manage_compl(struct htca_target *target)
+{
+	struct htca_request *req;
+	unsigned long flags;
+
+	/* Pop a request from the completion queue */
+	spin_lock_irqsave(&target->compl_queue_lock, flags);
+	req = htca_request_deq_head(&target->compl_queue);
+	spin_unlock_irqrestore(&target->compl_queue_lock, flags);
+
+	if (!req)
+		return 0; /* nothing to do */
+
+	/* Invoke request's corresponding completion function */
+	if (req->completion_cb)
+		req->completion_cb(req, req->status);
+
+	return 1;
+}
+
+static int htca_compl_task(void *param)
+{
+	struct htca_target *target = (struct htca_target *)param;
+
+	/* set_user_nice(current, -3); */
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	for (;;) {
+		/* Wait for htca_compl_task_poke */
+		wait_event_interruptible(target->compl_task_wait,
+					 target->compl_task_has_work);
+		if (target->compl_task_shutdown)
+			break; /* htcaTaskStop invoked */
+
+		if (!target->compl_task_has_work)
+			break; /* exit, if this task was interrupted */
+
+		/* reset before we start work */
+		target->compl_task_has_work = false;
+		barrier();
+
+		/* TBD: We could try to prioritize completions rather than
+		 * handle them strictly in order. Could use separate queues for
+		 * register completions and mailbox completion on each endpoint.
+		 * In general, completion processing is expected to be short
+		 * so this probably isn't worth the additional complexity.
+		 */
+		{
+			int did_work;
+
+			do {
+				did_work = htca_manage_compl(target);
+			} while (did_work);
+		}
+	}
+	complete_and_exit(&target->compl_cask_completion, 0);
+
+	return 0;
+}
+
+int htca_compl_task_start(struct htca_target *target)
+{
+	int status = HTCA_ERROR;
+
+	if (mutex_lock_interruptible(&target->task_mutex))
+		return HTCA_ERROR; /* interrupted */
+
+	if (target->compl_task)
+		goto done; /* already started */
+
+	target->compl_task =
+	    kthread_create(htca_compl_task, target, "htcaCompl");
+	if (!target->compl_task)
+		goto done; /* Failed to create task */
+
+	target->compl_task_shutdown = false;
+	init_waitqueue_head(&target->compl_task_wait);
+	init_completion(&target->compl_cask_completion);
+	wake_up_process(target->compl_task);
+	status = HTCA_OK;
+
+done:
+	mutex_unlock(&target->task_mutex);
+	return status;
+}
+
+void htca_compl_task_stop(struct htca_target *target)
+{
+	if (mutex_lock_interruptible(&target->task_mutex))
+		return; /* interrupted */
+
+	if (!target->compl_task)
+		goto done;
+
+	target->compl_task_shutdown = true;
+	htca_compl_task_poke(target);
+	wait_for_completion(&target->compl_cask_completion);
+	target->compl_task = NULL;
+
+done:
+	mutex_unlock(&target->task_mutex);
+}
diff --git a/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_utils.c b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_utils.c
new file mode 100644
index 0000000..4cf137c
--- /dev/null
+++ b/drivers/net/wireless/qca402x/htca_mbox/htca_mbox_utils.c
@@ -0,0 +1,182 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+
+#include "../hif_sdio/hif.h"
+#include "htca.h"
+#include "htca_mbox_internal.h"
+
+/* HTCA utility routines  */
+
+/* Invoked when shutting down */
+void htca_mbox_queue_flush(struct htca_endpoint *end_point,
+			   struct htca_request_queue *pending_queue,
+			   struct htca_request_queue *free_queue,
+			   u8 event_id)
+{
+	struct htca_event_info event_info;
+	u8 end_point_id;
+	struct htca_target *target;
+	struct htca_mbox_request *mbox_request;
+	unsigned long flags;
+
+	target = end_point->target;
+	end_point_id = get_endpoint_id(end_point);
+
+	spin_lock_irqsave(&end_point->mbox_queue_lock, flags);
+	for (;;) {
+		mbox_request =
+		    (struct htca_mbox_request *)htca_request_deq_head(
+			pending_queue);
+		spin_unlock_irqrestore(&end_point->mbox_queue_lock, flags);
+
+		if (!mbox_request)
+			break;
+
+		htca_frame_event(&event_info, mbox_request->buffer,
+				 mbox_request->buffer_length, 0, HTCA_ECANCELED,
+				 mbox_request->cookie);
+
+		htca_dispatch_event(target, end_point_id, event_id,
+				    &event_info);
+
+		/* Recycle the request */
+		spin_lock_irqsave(&end_point->mbox_queue_lock, flags);
+		htca_request_enq_tail(free_queue,
+				      (struct htca_request *)mbox_request);
+	}
+	spin_unlock_irqrestore(&end_point->mbox_queue_lock, flags);
+}
+
+struct htca_target *htca_target_instance(int i)
+{
+	return htca_target_list[i];
+}
+
+void htca_target_instance_add(struct htca_target *target)
+{
+	int i;
+
+	for (i = 0; i < HTCA_NUM_DEVICES_MAX; i++) {
+		if (!htca_target_list[i]) {
+			htca_target_list[i] = target;
+			break;
+		}
+	}
+	WARN_ON(i >= HTCA_NUM_DEVICES_MAX);
+}
+
+void htca_target_instance_remove(struct htca_target *target)
+{
+	int i;
+
+	for (i = 0; i < HTCA_NUM_DEVICES_MAX; i++) {
+		if (htca_target_list[i] == target) {
+			htca_target_list[i] = NULL;
+			break;
+		}
+	}
+	WARN_ON(i >= HTCA_NUM_DEVICES_MAX);
+}
+
+/* Add a request to the tail of a queue.
+ * Caller must handle any locking required.
+ * TBD: Use Linux queue support
+ */
+void htca_request_enq_tail(struct htca_request_queue *queue,
+			   struct htca_request *req)
+{
+	req->next = NULL;
+
+	if (queue->tail)
+		queue->tail->next = (void *)req;
+	else
+		queue->head = req;
+
+	queue->tail = req;
+}
+
+/* Remove a request from the start of a queue.
+ * Caller must handle any locking required.
+ * TBD: Use Linux queue support
+ * TBD: If cannot allocate from FREE queue, caller may add more elements.
+ */
+struct htca_request *htca_request_deq_head(struct htca_request_queue *queue)
+{
+	struct htca_request *req;
+
+	req = queue->head;
+	if (!req)
+		return NULL;
+
+	queue->head = req->next;
+	if (!queue->head)
+		queue->tail = NULL;
+	req->next = NULL;
+
+	return req;
+}
+
+/* Start a Register Refresh cycle.
+ *
+ * Submits a request to fetch ALL relevant registers from Target.
+ * When this completes, we'll take actions based on the new
+ * register values.
+ */
+void htca_register_refresh_start(struct htca_target *target)
+{
+	int status;
+	struct htca_reg_request *reg_request;
+	u32 address;
+	unsigned long flags;
+
+	htcadebug("Enter\n");
+	spin_lock_irqsave(&target->reg_queue_lock, flags);
+	reg_request = (struct htca_reg_request *)htca_request_deq_head(
+	    &target->reg_free_queue);
+	spin_unlock_irqrestore(&target->reg_queue_lock, flags);
+	if (!reg_request) {
+		WARN_ON(1);
+		return;
+	}
+	if (WARN_ON(reg_request->purpose != UNUSED_PURPOSE))
+		return;
+
+	spin_lock_irqsave(&target->pending_op_lock, flags);
+	target->pending_register_refresh++;
+	spin_unlock_irqrestore(&target->pending_op_lock, flags);
+
+	reg_request->buffer = (u8 *)&reg_request->u.reg_table;
+	reg_request->length = sizeof(reg_request->u.reg_table);
+	reg_request->purpose = INTR_REFRESH;
+	reg_request->epid = 0; /* not used */
+
+	address = get_reg_addr(ALL_STATUS_REG, ENDPOINT_UNUSED);
+	status = hif_read_write(target->hif_handle, address,
+				&reg_request->u.reg_table,
+				sizeof(reg_request->u.reg_table),
+				HIF_RD_ASYNC_BYTE_INC, reg_request);
+	if (status == HIF_OK && reg_request->req.completion_cb) {
+		reg_request->req.completion_cb(
+		    (struct htca_request *)reg_request, HIF_OK);
+		/* htca_register_refresh_compl */
+	} else if (status == HIF_PENDING) {
+		/* Will complete later */
+	} else { /* HIF error */
+		WARN_ON(1);
+	}
+}
diff --git a/drivers/net/wireless/qca402x/htca_mbox/mbox_host_reg.h b/drivers/net/wireless/qca402x/htca_mbox/mbox_host_reg.h
new file mode 100644
index 0000000..81ce632
--- /dev/null
+++ b/drivers/net/wireless/qca402x/htca_mbox/mbox_host_reg.h
@@ -0,0 +1,412 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MBOX_HOST_REG_REG_H_
+#define _MBOX_HOST_REG_REG_H_
+
+/* TBD: REMOVE things that are not needed, especially Diag Window */
+
+#define HOST_INT_STATUS_ADDRESS 0x00000400
+#define HOST_INT_STATUS_OFFSET 0x00000400
+#define HOST_INT_STATUS_ERROR_MSB 7
+#define HOST_INT_STATUS_ERROR_LSB 7
+#define HOST_INT_STATUS_ERROR_MASK 0x00000080
+#define HOST_INT_STATUS_ERROR_GET(x) \
+	(((x) & HOST_INT_STATUS_ERROR_MASK) >> HOST_INT_STATUS_ERROR_LSB)
+#define HOST_INT_STATUS_ERROR_SET(x) \
+	(((x) << HOST_INT_STATUS_ERROR_LSB) & HOST_INT_STATUS_ERROR_MASK)
+#define HOST_INT_STATUS_CPU_MSB 6
+#define HOST_INT_STATUS_CPU_LSB 6
+#define HOST_INT_STATUS_CPU_MASK 0x00000040
+#define HOST_INT_STATUS_CPU_GET(x) \
+	(((x) & HOST_INT_STATUS_CPU_MASK) >> HOST_INT_STATUS_CPU_LSB)
+#define HOST_INT_STATUS_CPU_SET(x) \
+	(((x) << HOST_INT_STATUS_CPU_LSB) & HOST_INT_STATUS_CPU_MASK)
+#define HOST_INT_STATUS_DRAGON_INT_MSB 5
+#define HOST_INT_STATUS_DRAGON_INT_LSB 5
+#define HOST_INT_STATUS_DRAGON_INT_MASK 0x00000020
+#define HOST_INT_STATUS_DRAGON_INT_GET(x) \
+	(((x) & HOST_INT_STATUS_DRAGON_INT_MASK) >> \
+	 HOST_INT_STATUS_DRAGON_INT_LSB)
+#define HOST_INT_STATUS_DRAGON_INT_SET(x) \
+	(((x) << HOST_INT_STATUS_DRAGON_INT_LSB) & \
+	 HOST_INT_STATUS_DRAGON_INT_MASK)
+#define HOST_INT_STATUS_COUNTER_MSB 4
+#define HOST_INT_STATUS_COUNTER_LSB 4
+#define HOST_INT_STATUS_COUNTER_MASK 0x00000010
+#define HOST_INT_STATUS_COUNTER_GET(x) \
+	(((x) & HOST_INT_STATUS_COUNTER_MASK) >> HOST_INT_STATUS_COUNTER_LSB)
+#define HOST_INT_STATUS_COUNTER_SET(x) \
+	(((x) << HOST_INT_STATUS_COUNTER_LSB) & HOST_INT_STATUS_COUNTER_MASK)
+#define HOST_INT_STATUS_MBOX_DATA_MSB 3
+#define HOST_INT_STATUS_MBOX_DATA_LSB 0
+#define HOST_INT_STATUS_MBOX_DATA_MASK 0x0000000f
+#define HOST_INT_STATUS_MBOX_DATA_GET(x) \
+	(((x) & HOST_INT_STATUS_MBOX_DATA_MASK) >> \
+						HOST_INT_STATUS_MBOX_DATA_LSB)
+#define HOST_INT_STATUS_MBOX_DATA_SET(x) \
+	(((x) << HOST_INT_STATUS_MBOX_DATA_LSB) & \
+	 HOST_INT_STATUS_MBOX_DATA_MASK)
+
+#define CPU_INT_STATUS_ADDRESS 0x00000401
+#define CPU_INT_STATUS_OFFSET 0x00000401
+#define CPU_INT_STATUS_BIT_MSB 7
+#define CPU_INT_STATUS_BIT_LSB 0
+#define CPU_INT_STATUS_BIT_MASK 0x000000ff
+#define CPU_INT_STATUS_BIT_GET(x) \
+	(((x) & CPU_INT_STATUS_BIT_MASK) >> CPU_INT_STATUS_BIT_LSB)
+#define CPU_INT_STATUS_BIT_SET(x) \
+	(((x) << CPU_INT_STATUS_BIT_LSB) & CPU_INT_STATUS_BIT_MASK)
+
+#define ERROR_INT_STATUS_ADDRESS 0x00000402
+#define ERROR_INT_STATUS_OFFSET 0x00000402
+#define ERROR_INT_STATUS_SPI_MSB 3
+#define ERROR_INT_STATUS_SPI_LSB 3
+#define ERROR_INT_STATUS_SPI_MASK 0x00000008
+#define ERROR_INT_STATUS_SPI_GET(x) \
+	(((x) & ERROR_INT_STATUS_SPI_MASK) >> ERROR_INT_STATUS_SPI_LSB)
+#define ERROR_INT_STATUS_SPI_SET(x) \
+	(((x) << ERROR_INT_STATUS_SPI_LSB) & ERROR_INT_STATUS_SPI_MASK)
+#define ERROR_INT_STATUS_WAKEUP_MSB 2
+#define ERROR_INT_STATUS_WAKEUP_LSB 2
+#define ERROR_INT_STATUS_WAKEUP_MASK 0x00000004
+#define ERROR_INT_STATUS_WAKEUP_GET(x) \
+	(((x) & ERROR_INT_STATUS_WAKEUP_MASK) >> ERROR_INT_STATUS_WAKEUP_LSB)
+#define ERROR_INT_STATUS_WAKEUP_SET(x) \
+	(((x) << ERROR_INT_STATUS_WAKEUP_LSB) & ERROR_INT_STATUS_WAKEUP_MASK)
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MSB 1
+#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB 1
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00000002
+#define ERROR_INT_STATUS_RX_UNDERFLOW_GET(x) \
+	(((x) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK) >> \
+	 ERROR_INT_STATUS_RX_UNDERFLOW_LSB)
+#define ERROR_INT_STATUS_RX_UNDERFLOW_SET(x) \
+	(((x) << ERROR_INT_STATUS_RX_UNDERFLOW_LSB) & \
+	 ERROR_INT_STATUS_RX_UNDERFLOW_MASK)
+#define ERROR_INT_STATUS_TX_OVERFLOW_MSB 0
+#define ERROR_INT_STATUS_TX_OVERFLOW_LSB 0
+#define ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00000001
+#define ERROR_INT_STATUS_TX_OVERFLOW_GET(x) \
+	(((x) & ERROR_INT_STATUS_TX_OVERFLOW_MASK) >> \
+	 ERROR_INT_STATUS_TX_OVERFLOW_LSB)
+#define ERROR_INT_STATUS_TX_OVERFLOW_SET(x) \
+	(((x) << ERROR_INT_STATUS_TX_OVERFLOW_LSB) & \
+	 ERROR_INT_STATUS_TX_OVERFLOW_MASK)
+
+#define COUNTER_INT_STATUS_ADDRESS 0x00000403
+#define COUNTER_INT_STATUS_OFFSET 0x00000403
+#define COUNTER_INT_STATUS_COUNTER_MSB 7
+#define COUNTER_INT_STATUS_COUNTER_LSB 0
+#define COUNTER_INT_STATUS_COUNTER_MASK 0x000000ff
+#define COUNTER_INT_STATUS_COUNTER_GET(x) \
+	(((x) & COUNTER_INT_STATUS_COUNTER_MASK) >> \
+	 COUNTER_INT_STATUS_COUNTER_LSB)
+#define COUNTER_INT_STATUS_COUNTER_SET(x) \
+	(((x) << COUNTER_INT_STATUS_COUNTER_LSB) & \
+	 COUNTER_INT_STATUS_COUNTER_MASK)
+
+#define MBOX_FRAME_ADDRESS 0x00000404
+#define MBOX_FRAME_OFFSET 0x00000404
+#define MBOX_FRAME_RX_EOM_MSB 7
+#define MBOX_FRAME_RX_EOM_LSB 4
+#define MBOX_FRAME_RX_EOM_MASK 0x000000f0
+#define MBOX_FRAME_RX_EOM_GET(x) \
+	(((x) & MBOX_FRAME_RX_EOM_MASK) >> MBOX_FRAME_RX_EOM_LSB)
+#define MBOX_FRAME_RX_EOM_SET(x) \
+	(((x) << MBOX_FRAME_RX_EOM_LSB) & MBOX_FRAME_RX_EOM_MASK)
+#define MBOX_FRAME_RX_SOM_MSB 3
+#define MBOX_FRAME_RX_SOM_LSB 0
+#define MBOX_FRAME_RX_SOM_MASK 0x0000000f
+#define MBOX_FRAME_RX_SOM_GET(x) \
+	(((x) & MBOX_FRAME_RX_SOM_MASK) >> MBOX_FRAME_RX_SOM_LSB)
+#define MBOX_FRAME_RX_SOM_SET(x) \
+	(((x) << MBOX_FRAME_RX_SOM_LSB) & MBOX_FRAME_RX_SOM_MASK)
+
+#define RX_LOOKAHEAD_VALID_ADDRESS 0x00000405
+#define RX_LOOKAHEAD_VALID_OFFSET 0x00000405
+#define RX_LOOKAHEAD_VALID_MBOX_MSB 3
+#define RX_LOOKAHEAD_VALID_MBOX_LSB 0
+#define RX_LOOKAHEAD_VALID_MBOX_MASK 0x0000000f
+#define RX_LOOKAHEAD_VALID_MBOX_GET(x) \
+	(((x) & RX_LOOKAHEAD_VALID_MBOX_MASK) >> RX_LOOKAHEAD_VALID_MBOX_LSB)
+#define RX_LOOKAHEAD_VALID_MBOX_SET(x) \
+	(((x) << RX_LOOKAHEAD_VALID_MBOX_LSB) & RX_LOOKAHEAD_VALID_MBOX_MASK)
+
+#define RX_LOOKAHEAD0_ADDRESS 0x00000408
+#define RX_LOOKAHEAD0_OFFSET 0x00000408
+#define RX_LOOKAHEAD0_DATA_MSB 7
+#define RX_LOOKAHEAD0_DATA_LSB 0
+#define RX_LOOKAHEAD0_DATA_MASK 0x000000ff
+#define RX_LOOKAHEAD0_DATA_GET(x) \
+	(((x) & RX_LOOKAHEAD0_DATA_MASK) >> RX_LOOKAHEAD0_DATA_LSB)
+#define RX_LOOKAHEAD0_DATA_SET(x) \
+	(((x) << RX_LOOKAHEAD0_DATA_LSB) & RX_LOOKAHEAD0_DATA_MASK)
+
+#define RX_LOOKAHEAD1_ADDRESS 0x0000040c
+#define RX_LOOKAHEAD1_OFFSET 0x0000040c
+#define RX_LOOKAHEAD1_DATA_MSB 7
+#define RX_LOOKAHEAD1_DATA_LSB 0
+#define RX_LOOKAHEAD1_DATA_MASK 0x000000ff
+#define RX_LOOKAHEAD1_DATA_GET(x) \
+	(((x) & RX_LOOKAHEAD1_DATA_MASK) >> RX_LOOKAHEAD1_DATA_LSB)
+#define RX_LOOKAHEAD1_DATA_SET(x) \
+	(((x) << RX_LOOKAHEAD1_DATA_LSB) & RX_LOOKAHEAD1_DATA_MASK)
+
+#define RX_LOOKAHEAD2_ADDRESS 0x00000410
+#define RX_LOOKAHEAD2_OFFSET 0x00000410
+#define RX_LOOKAHEAD2_DATA_MSB 7
+#define RX_LOOKAHEAD2_DATA_LSB 0
+#define RX_LOOKAHEAD2_DATA_MASK 0x000000ff
+#define RX_LOOKAHEAD2_DATA_GET(x) \
+	(((x) & RX_LOOKAHEAD2_DATA_MASK) >> RX_LOOKAHEAD2_DATA_LSB)
+#define RX_LOOKAHEAD2_DATA_SET(x) \
+	(((x) << RX_LOOKAHEAD2_DATA_LSB) & RX_LOOKAHEAD2_DATA_MASK)
+
+#define RX_LOOKAHEAD3_ADDRESS 0x00000414
+#define RX_LOOKAHEAD3_OFFSET 0x00000414
+#define RX_LOOKAHEAD3_DATA_MSB 7
+#define RX_LOOKAHEAD3_DATA_LSB 0
+#define RX_LOOKAHEAD3_DATA_MASK 0x000000ff
+#define RX_LOOKAHEAD3_DATA_GET(x) \
+	(((x) & RX_LOOKAHEAD3_DATA_MASK) >> RX_LOOKAHEAD3_DATA_LSB)
+#define RX_LOOKAHEAD3_DATA_SET(x) \
+	(((x) << RX_LOOKAHEAD3_DATA_LSB) & RX_LOOKAHEAD3_DATA_MASK)
+
+#define INT_STATUS_ENABLE_ADDRESS 0x00000418
+#define INT_STATUS_ENABLE_OFFSET 0x00000418
+#define INT_STATUS_ENABLE_ERROR_MSB 7
+#define INT_STATUS_ENABLE_ERROR_LSB 7
+#define INT_STATUS_ENABLE_ERROR_MASK 0x00000080
+#define INT_STATUS_ENABLE_ERROR_GET(x) \
+	(((x) & INT_STATUS_ENABLE_ERROR_MASK) >> INT_STATUS_ENABLE_ERROR_LSB)
+#define INT_STATUS_ENABLE_ERROR_SET(x) \
+	(((x) << INT_STATUS_ENABLE_ERROR_LSB) & INT_STATUS_ENABLE_ERROR_MASK)
+#define INT_STATUS_ENABLE_CPU_MSB 6
+#define INT_STATUS_ENABLE_CPU_LSB 6
+#define INT_STATUS_ENABLE_CPU_MASK 0x00000040
+#define INT_STATUS_ENABLE_CPU_GET(x) \
+	(((x) & INT_STATUS_ENABLE_CPU_MASK) >> INT_STATUS_ENABLE_CPU_LSB)
+#define INT_STATUS_ENABLE_CPU_SET(x) \
+	(((x) << INT_STATUS_ENABLE_CPU_LSB) & INT_STATUS_ENABLE_CPU_MASK)
+#define INT_STATUS_ENABLE_DRAGON_INT_MSB 5
+#define INT_STATUS_ENABLE_DRAGON_INT_LSB 5
+#define INT_STATUS_ENABLE_DRAGON_INT_MASK 0x00000020
+#define INT_STATUS_ENABLE_DRAGON_INT_GET(x) \
+	(((x) & INT_STATUS_ENABLE_DRAGON_INT_MASK) >> \
+	 INT_STATUS_ENABLE_DRAGON_INT_LSB)
+#define INT_STATUS_ENABLE_DRAGON_INT_SET(x) \
+	(((x) << INT_STATUS_ENABLE_DRAGON_INT_LSB) & \
+	 INT_STATUS_ENABLE_DRAGON_INT_MASK)
+#define INT_STATUS_ENABLE_COUNTER_MSB 4
+#define INT_STATUS_ENABLE_COUNTER_LSB 4
+#define INT_STATUS_ENABLE_COUNTER_MASK 0x00000010
+#define INT_STATUS_ENABLE_COUNTER_GET(x) \
+	(((x) & INT_STATUS_ENABLE_COUNTER_MASK) >> \
+						INT_STATUS_ENABLE_COUNTER_LSB)
+#define INT_STATUS_ENABLE_COUNTER_SET(x) \
+	(((x) << INT_STATUS_ENABLE_COUNTER_LSB) & \
+	 INT_STATUS_ENABLE_COUNTER_MASK)
+#define INT_STATUS_ENABLE_MBOX_DATA_MSB 3
+#define INT_STATUS_ENABLE_MBOX_DATA_LSB 0
+#define INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f
+#define INT_STATUS_ENABLE_MBOX_DATA_GET(x) \
+	(((x) & INT_STATUS_ENABLE_MBOX_DATA_MASK) >> \
+	 INT_STATUS_ENABLE_MBOX_DATA_LSB)
+#define INT_STATUS_ENABLE_MBOX_DATA_SET(x) \
+	(((x) << INT_STATUS_ENABLE_MBOX_DATA_LSB) & \
+	 INT_STATUS_ENABLE_MBOX_DATA_MASK)
+
+#define CPU_INT_STATUS_ENABLE_ADDRESS 0x00000419
+#define CPU_INT_STATUS_ENABLE_OFFSET 0x00000419
+#define CPU_INT_STATUS_ENABLE_BIT_MSB 7
+#define CPU_INT_STATUS_ENABLE_BIT_LSB 0
+#define CPU_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define CPU_INT_STATUS_ENABLE_BIT_GET(x) \
+	(((x) & CPU_INT_STATUS_ENABLE_BIT_MASK) >> \
+						CPU_INT_STATUS_ENABLE_BIT_LSB)
+#define CPU_INT_STATUS_ENABLE_BIT_SET(x) \
+	(((x) << CPU_INT_STATUS_ENABLE_BIT_LSB) & \
+	 CPU_INT_STATUS_ENABLE_BIT_MASK)
+
+#define ERROR_STATUS_ENABLE_ADDRESS 0x0000041a
+#define ERROR_STATUS_ENABLE_OFFSET 0x0000041a
+#define ERROR_STATUS_ENABLE_WAKEUP_MSB 2
+#define ERROR_STATUS_ENABLE_WAKEUP_LSB 2
+#define ERROR_STATUS_ENABLE_WAKEUP_MASK 0x00000004
+#define ERROR_STATUS_ENABLE_WAKEUP_GET(x) \
+	(((x) & ERROR_STATUS_ENABLE_WAKEUP_MASK) >> \
+	 ERROR_STATUS_ENABLE_WAKEUP_LSB)
+#define ERROR_STATUS_ENABLE_WAKEUP_SET(x) \
+	(((x) << ERROR_STATUS_ENABLE_WAKEUP_LSB) & \
+	 ERROR_STATUS_ENABLE_WAKEUP_MASK)
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MSB 1
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 1
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00000002
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_GET(x) \
+	(((x) & ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) >> \
+	 ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB)
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(x) \
+	(((x) << ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) & \
+	 ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK)
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MSB 0
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 0
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00000001
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_GET(x) \
+	(((x) & ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) >> \
+	 ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB)
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(x) \
+	(((x) << ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) & \
+	 ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK)
+
+#define COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000041b
+#define COUNTER_INT_STATUS_ENABLE_OFFSET 0x0000041b
+#define COUNTER_INT_STATUS_ENABLE_BIT_MSB 7
+#define COUNTER_INT_STATUS_ENABLE_BIT_LSB 0
+#define COUNTER_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define COUNTER_INT_STATUS_ENABLE_BIT_GET(x) \
+	(((x) & COUNTER_INT_STATUS_ENABLE_BIT_MASK) >> \
+	 COUNTER_INT_STATUS_ENABLE_BIT_LSB)
+#define COUNTER_INT_STATUS_ENABLE_BIT_SET(x) \
+	(((x) << COUNTER_INT_STATUS_ENABLE_BIT_LSB) & \
+	 COUNTER_INT_STATUS_ENABLE_BIT_MASK)
+
+#define COUNT_ADDRESS 0x00000420
+#define COUNT_OFFSET 0x00000420
+#define COUNT_VALUE_MSB 7
+#define COUNT_VALUE_LSB 0
+#define COUNT_VALUE_MASK 0x000000ff
+#define COUNT_VALUE_GET(x) (((x) & COUNT_VALUE_MASK) >> COUNT_VALUE_LSB)
+#define COUNT_VALUE_SET(x) (((x) << COUNT_VALUE_LSB) & COUNT_VALUE_MASK)
+
+#define COUNT_DEC_ADDRESS 0x00000440
+#define COUNT_DEC_OFFSET 0x00000440
+#define COUNT_DEC_VALUE_MSB 7
+#define COUNT_DEC_VALUE_LSB 0
+#define COUNT_DEC_VALUE_MASK 0x000000ff
+#define COUNT_DEC_VALUE_GET(x) \
+	(((x) & COUNT_DEC_VALUE_MASK) >> COUNT_DEC_VALUE_LSB)
+#define COUNT_DEC_VALUE_SET(x) \
+	(((x) << COUNT_DEC_VALUE_LSB) & COUNT_DEC_VALUE_MASK)
+
+#define SCRATCH_ADDRESS 0x00000460
+#define SCRATCH_OFFSET 0x00000460
+#define SCRATCH_VALUE_MSB 7
+#define SCRATCH_VALUE_LSB 0
+#define SCRATCH_VALUE_MASK 0x000000ff
+#define SCRATCH_VALUE_GET(x) (((x) & SCRATCH_VALUE_MASK) >> SCRATCH_VALUE_LSB)
+#define SCRATCH_VALUE_SET(x) (((x) << SCRATCH_VALUE_LSB) & SCRATCH_VALUE_MASK)
+
+#define FIFO_TIMEOUT_ADDRESS 0x00000468
+#define FIFO_TIMEOUT_OFFSET 0x00000468
+#define FIFO_TIMEOUT_VALUE_MSB 7
+#define FIFO_TIMEOUT_VALUE_LSB 0
+#define FIFO_TIMEOUT_VALUE_MASK 0x000000ff
+#define FIFO_TIMEOUT_VALUE_GET(x) \
+	(((x) & FIFO_TIMEOUT_VALUE_MASK) >> FIFO_TIMEOUT_VALUE_LSB)
+#define FIFO_TIMEOUT_VALUE_SET(x) \
+	(((x) << FIFO_TIMEOUT_VALUE_LSB) & FIFO_TIMEOUT_VALUE_MASK)
+
+#define FIFO_TIMEOUT_ENABLE_ADDRESS 0x00000469
+#define FIFO_TIMEOUT_ENABLE_OFFSET 0x00000469
+#define FIFO_TIMEOUT_ENABLE_SET_MSB 0
+#define FIFO_TIMEOUT_ENABLE_SET_LSB 0
+#define FIFO_TIMEOUT_ENABLE_SET_MASK 0x00000001
+#define FIFO_TIMEOUT_ENABLE_SET_GET(x) \
+	(((x) & FIFO_TIMEOUT_ENABLE_SET_MASK) >> FIFO_TIMEOUT_ENABLE_SET_LSB)
+#define FIFO_TIMEOUT_ENABLE_SET_SET(x) \
+	(((x) << FIFO_TIMEOUT_ENABLE_SET_LSB) & FIFO_TIMEOUT_ENABLE_SET_MASK)
+
+#define INT_WLAN_ADDRESS 0x00000472
+#define INT_TARGET_ADDRESS INT_WLAN_ADDRESS
+#define INT_WLAN_OFFSET 0x00000472
+#define INT_WLAN_VECTOR_MSB 7
+#define INT_WLAN_VECTOR_LSB 0
+#define INT_WLAN_VECTOR_MASK 0x000000ff
+#define INT_WLAN_VECTOR_GET(x) \
+	(((x) & INT_WLAN_VECTOR_MASK) >> INT_WLAN_VECTOR_LSB)
+#define INT_WLAN_VECTOR_SET(x) \
+	(((x) << INT_WLAN_VECTOR_LSB) & INT_WLAN_VECTOR_MASK)
+
+#define SPI_CONFIG_ADDRESS 0x00000480
+#define SPI_CONFIG_OFFSET 0x00000480
+#define SPI_CONFIG_SPI_RESET_MSB 4
+#define SPI_CONFIG_SPI_RESET_LSB 4
+#define SPI_CONFIG_SPI_RESET_MASK 0x00000010
+#define SPI_CONFIG_SPI_RESET_GET(x) \
+	(((x) & SPI_CONFIG_SPI_RESET_MASK) >> SPI_CONFIG_SPI_RESET_LSB)
+#define SPI_CONFIG_SPI_RESET_SET(x) \
+	(((x) << SPI_CONFIG_SPI_RESET_LSB) & SPI_CONFIG_SPI_RESET_MASK)
+#define SPI_CONFIG_INTERRUPT_ENABLE_MSB 3
+#define SPI_CONFIG_INTERRUPT_ENABLE_LSB 3
+#define SPI_CONFIG_INTERRUPT_ENABLE_MASK 0x00000008
+#define SPI_CONFIG_INTERRUPT_ENABLE_GET(x) \
+	(((x) & SPI_CONFIG_INTERRUPT_ENABLE_MASK) >> \
+	 SPI_CONFIG_INTERRUPT_ENABLE_LSB)
+#define SPI_CONFIG_INTERRUPT_ENABLE_SET(x) \
+	(((x) << SPI_CONFIG_INTERRUPT_ENABLE_LSB) & \
+	 SPI_CONFIG_INTERRUPT_ENABLE_MASK)
+#define SPI_CONFIG_TEST_MODE_MSB 2
+#define SPI_CONFIG_TEST_MODE_LSB 2
+#define SPI_CONFIG_TEST_MODE_MASK 0x00000004
+#define SPI_CONFIG_TEST_MODE_GET(x) \
+	(((x) & SPI_CONFIG_TEST_MODE_MASK) >> SPI_CONFIG_TEST_MODE_LSB)
+#define SPI_CONFIG_TEST_MODE_SET(x) \
+	(((x) << SPI_CONFIG_TEST_MODE_LSB) & SPI_CONFIG_TEST_MODE_MASK)
+#define SPI_CONFIG_DATA_SIZE_MSB 1
+#define SPI_CONFIG_DATA_SIZE_LSB 0
+#define SPI_CONFIG_DATA_SIZE_MASK 0x00000003
+#define SPI_CONFIG_DATA_SIZE_GET(x) \
+	(((x) & SPI_CONFIG_DATA_SIZE_MASK) >> SPI_CONFIG_DATA_SIZE_LSB)
+#define SPI_CONFIG_DATA_SIZE_SET(x) \
+	(((x) << SPI_CONFIG_DATA_SIZE_LSB) & SPI_CONFIG_DATA_SIZE_MASK)
+
+#define SPI_STATUS_ADDRESS 0x00000481
+#define SPI_STATUS_OFFSET 0x00000481
+#define SPI_STATUS_ADDR_ERR_MSB 3
+#define SPI_STATUS_ADDR_ERR_LSB 3
+#define SPI_STATUS_ADDR_ERR_MASK 0x00000008
+#define SPI_STATUS_ADDR_ERR_GET(x) \
+	(((x) & SPI_STATUS_ADDR_ERR_MASK) >> SPI_STATUS_ADDR_ERR_LSB)
+#define SPI_STATUS_ADDR_ERR_SET(x) \
+	(((x) << SPI_STATUS_ADDR_ERR_LSB) & SPI_STATUS_ADDR_ERR_MASK)
+#define SPI_STATUS_RD_ERR_MSB 2
+#define SPI_STATUS_RD_ERR_LSB 2
+#define SPI_STATUS_RD_ERR_MASK 0x00000004
+#define SPI_STATUS_RD_ERR_GET(x) \
+	(((x) & SPI_STATUS_RD_ERR_MASK) >> SPI_STATUS_RD_ERR_LSB)
+#define SPI_STATUS_RD_ERR_SET(x) \
+	(((x) << SPI_STATUS_RD_ERR_LSB) & SPI_STATUS_RD_ERR_MASK)
+#define SPI_STATUS_WR_ERR_MSB 1
+#define SPI_STATUS_WR_ERR_LSB 1
+#define SPI_STATUS_WR_ERR_MASK 0x00000002
+#define SPI_STATUS_WR_ERR_GET(x) \
+	(((x) & SPI_STATUS_WR_ERR_MASK) >> SPI_STATUS_WR_ERR_LSB)
+#define SPI_STATUS_WR_ERR_SET(x) \
+	(((x) << SPI_STATUS_WR_ERR_LSB) & SPI_STATUS_WR_ERR_MASK)
+#define SPI_STATUS_READY_MSB 0
+#define SPI_STATUS_READY_LSB 0
+#define SPI_STATUS_READY_MASK 0x00000001
+#define SPI_STATUS_READY_GET(x) \
+	(((x) & SPI_STATUS_READY_MASK) >> SPI_STATUS_READY_LSB)
+#define SPI_STATUS_READY_SET(x) \
+	(((x) << SPI_STATUS_READY_LSB) & SPI_STATUS_READY_MASK)
+#define INT_WLAN_ADDRESS 0x00000472
+#define INT_WLAN_OFFSET 0x00000472
+#define INT_WLAN_VECTOR_MSB 7
+#define INT_WLAN_VECTOR_LSB 0
+#define INT_WLAN_VECTOR_MASK 0x000000ff
+#define INT_WLAN_VECTOR_GET(x) \
+	(((x) & INT_WLAN_VECTOR_MASK) >> INT_WLAN_VECTOR_LSB)
+#define INT_WLAN_VECTOR_SET(x) \
+	(((x) << INT_WLAN_VECTOR_LSB) & INT_WLAN_VECTOR_MASK)
+
+#endif /* _MBOX_HOST_REG_H_ */